2024-11-16 12:45:21,796 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-16 12:45:21,807 main DEBUG Took 0.010045 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-16 12:45:21,808 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-16 12:45:21,808 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-16 12:45:21,809 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-16 12:45:21,810 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-16 12:45:21,818 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-16 12:45:21,830 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-16 12:45:21,832 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-16 12:45:21,832 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-16 12:45:21,833 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-16 12:45:21,833 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-16 12:45:21,834 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-16 12:45:21,834 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-16 12:45:21,835 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-16 12:45:21,835 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-16 12:45:21,836 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-16 12:45:21,837 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-16 12:45:21,837 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-16 12:45:21,837 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-16 12:45:21,838 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-16 12:45:21,838 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-16 12:45:21,839 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-16 12:45:21,839 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-16 12:45:21,840 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-16 12:45:21,840 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-16 12:45:21,840 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-16 12:45:21,841 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-16 12:45:21,841 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-16 12:45:21,841 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-16 12:45:21,842 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-16 12:45:21,842 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-16 12:45:21,842 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-16 12:45:21,844 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-16 12:45:21,845 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-16 12:45:21,847 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-16 12:45:21,848 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-16 12:45:21,849 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-16 12:45:21,849 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-16 12:45:21,857 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-16 12:45:21,860 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-16 12:45:21,862 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-16 12:45:21,862 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-16 12:45:21,863 main DEBUG createAppenders(={Console}) 2024-11-16 12:45:21,864 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca initialized 2024-11-16 12:45:21,864 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-16 12:45:21,864 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca OK. 2024-11-16 12:45:21,865 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-16 12:45:21,865 main DEBUG OutputStream closed 2024-11-16 12:45:21,865 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-16 12:45:21,866 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-16 12:45:21,866 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@6404f418 OK 2024-11-16 12:45:21,937 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-16 12:45:21,939 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-16 12:45:21,940 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-16 12:45:21,941 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-16 12:45:21,942 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-16 12:45:21,942 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-16 12:45:21,942 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-16 12:45:21,943 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-16 12:45:21,943 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-16 12:45:21,943 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-16 12:45:21,944 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-16 12:45:21,944 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-16 12:45:21,944 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-16 12:45:21,944 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-16 12:45:21,945 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-16 12:45:21,945 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-16 12:45:21,945 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-16 12:45:21,946 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-16 12:45:21,948 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-16 12:45:21,949 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@6dab9b6d) with optional ClassLoader: null 2024-11-16 12:45:21,949 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-16 12:45:21,950 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@6dab9b6d] started OK. 2024-11-16T12:45:22,170 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/edd6923c-0677-90e0-cc1c-9bbca94963aa 2024-11-16 12:45:22,173 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-16 12:45:22,174 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-16T12:45:22,182 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestLogRolling timeout: 13 mins 2024-11-16T12:45:22,220 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=12, OpenFileDescriptor=287, MaxFileDescriptor=1048576, SystemLoadAverage=266, ProcessCount=11, AvailableMemoryMB=4324 2024-11-16T12:45:22,223 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-16T12:45:22,244 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/edd6923c-0677-90e0-cc1c-9bbca94963aa/cluster_b35c52b3-6cb1-7a8b-5887-65afa3b95417, deleteOnExit=true 2024-11-16T12:45:22,244 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-16T12:45:22,246 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/edd6923c-0677-90e0-cc1c-9bbca94963aa/test.cache.data in system properties and HBase conf 2024-11-16T12:45:22,247 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/edd6923c-0677-90e0-cc1c-9bbca94963aa/hadoop.tmp.dir in system properties and HBase conf 2024-11-16T12:45:22,248 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/edd6923c-0677-90e0-cc1c-9bbca94963aa/hadoop.log.dir in system properties and HBase conf 2024-11-16T12:45:22,249 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/edd6923c-0677-90e0-cc1c-9bbca94963aa/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-16T12:45:22,249 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/edd6923c-0677-90e0-cc1c-9bbca94963aa/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-16T12:45:22,250 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-16T12:45:22,351 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-16T12:45:22,466 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-16T12:45:22,471 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/edd6923c-0677-90e0-cc1c-9bbca94963aa/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-16T12:45:22,472 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/edd6923c-0677-90e0-cc1c-9bbca94963aa/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-16T12:45:22,473 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/edd6923c-0677-90e0-cc1c-9bbca94963aa/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-16T12:45:22,474 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/edd6923c-0677-90e0-cc1c-9bbca94963aa/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-16T12:45:22,474 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/edd6923c-0677-90e0-cc1c-9bbca94963aa/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-16T12:45:22,475 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/edd6923c-0677-90e0-cc1c-9bbca94963aa/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-16T12:45:22,476 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/edd6923c-0677-90e0-cc1c-9bbca94963aa/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-16T12:45:22,476 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/edd6923c-0677-90e0-cc1c-9bbca94963aa/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-16T12:45:22,477 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/edd6923c-0677-90e0-cc1c-9bbca94963aa/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-16T12:45:22,478 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/edd6923c-0677-90e0-cc1c-9bbca94963aa/nfs.dump.dir in system properties and HBase conf 2024-11-16T12:45:22,478 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/edd6923c-0677-90e0-cc1c-9bbca94963aa/java.io.tmpdir in system properties and HBase conf 2024-11-16T12:45:22,479 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/edd6923c-0677-90e0-cc1c-9bbca94963aa/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-16T12:45:22,480 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/edd6923c-0677-90e0-cc1c-9bbca94963aa/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-16T12:45:22,480 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/edd6923c-0677-90e0-cc1c-9bbca94963aa/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-16T12:45:22,937 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-16T12:45:23,533 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-16T12:45:23,602 INFO [Time-limited test {}] log.Log(170): Logging initialized @2488ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-16T12:45:23,665 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T12:45:23,720 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T12:45:23,737 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T12:45:23,737 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T12:45:23,738 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-16T12:45:23,750 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T12:45:23,753 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/edd6923c-0677-90e0-cc1c-9bbca94963aa/hadoop.log.dir/,AVAILABLE} 2024-11-16T12:45:23,754 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T12:45:23,930 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@735fa16a{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/edd6923c-0677-90e0-cc1c-9bbca94963aa/java.io.tmpdir/jetty-localhost-35817-hadoop-hdfs-3_4_1-tests_jar-_-any-3321075380561704811/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-16T12:45:23,935 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:35817} 2024-11-16T12:45:23,936 INFO [Time-limited test {}] server.Server(415): Started @2823ms 2024-11-16T12:45:23,959 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-16T12:45:24,454 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T12:45:24,462 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T12:45:24,462 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T12:45:24,463 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T12:45:24,463 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-16T12:45:24,464 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@28778f0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/edd6923c-0677-90e0-cc1c-9bbca94963aa/hadoop.log.dir/,AVAILABLE} 2024-11-16T12:45:24,464 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@371e191c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T12:45:24,564 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7b07d1ba{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/edd6923c-0677-90e0-cc1c-9bbca94963aa/java.io.tmpdir/jetty-localhost-36323-hadoop-hdfs-3_4_1-tests_jar-_-any-17278510456864617823/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T12:45:24,565 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@43e0a762{HTTP/1.1, (http/1.1)}{localhost:36323} 2024-11-16T12:45:24,565 INFO [Time-limited test {}] server.Server(415): Started @3452ms 2024-11-16T12:45:24,617 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T12:45:24,729 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T12:45:24,736 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T12:45:24,737 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T12:45:24,737 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T12:45:24,737 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-16T12:45:24,738 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@11effdcd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/edd6923c-0677-90e0-cc1c-9bbca94963aa/hadoop.log.dir/,AVAILABLE} 2024-11-16T12:45:24,739 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2d48d695{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T12:45:24,841 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1bf97579{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/edd6923c-0677-90e0-cc1c-9bbca94963aa/java.io.tmpdir/jetty-localhost-40753-hadoop-hdfs-3_4_1-tests_jar-_-any-8756135533625344861/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T12:45:24,842 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@22b88bcb{HTTP/1.1, (http/1.1)}{localhost:40753} 2024-11-16T12:45:24,843 INFO [Time-limited test {}] server.Server(415): Started @3729ms 2024-11-16T12:45:24,845 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T12:45:25,641 WARN [Thread-98 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/edd6923c-0677-90e0-cc1c-9bbca94963aa/cluster_b35c52b3-6cb1-7a8b-5887-65afa3b95417/data/data1/current/BP-335813976-172.17.0.2-1731761123019/current, will proceed with Du for space computation calculation, 2024-11-16T12:45:25,641 WARN [Thread-101 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/edd6923c-0677-90e0-cc1c-9bbca94963aa/cluster_b35c52b3-6cb1-7a8b-5887-65afa3b95417/data/data4/current/BP-335813976-172.17.0.2-1731761123019/current, will proceed with Du for space computation calculation, 2024-11-16T12:45:25,641 WARN [Thread-99 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/edd6923c-0677-90e0-cc1c-9bbca94963aa/cluster_b35c52b3-6cb1-7a8b-5887-65afa3b95417/data/data2/current/BP-335813976-172.17.0.2-1731761123019/current, will proceed with Du for space computation calculation, 2024-11-16T12:45:25,641 WARN [Thread-100 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/edd6923c-0677-90e0-cc1c-9bbca94963aa/cluster_b35c52b3-6cb1-7a8b-5887-65afa3b95417/data/data3/current/BP-335813976-172.17.0.2-1731761123019/current, will proceed with Du for space computation calculation, 2024-11-16T12:45:25,675 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T12:45:25,675 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T12:45:25,719 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb51d70b080759801 with lease ID 0xb1840554c5f7bde1: Processing first storage report for DS-5993f530-0256-445a-b7b9-7b5096d9dafd from datanode DatanodeRegistration(127.0.0.1:39369, datanodeUuid=b1151d9b-99ed-41d9-bf9c-a2d4affbbce3, infoPort=39415, infoSecurePort=0, ipcPort=43717, storageInfo=lv=-57;cid=testClusterID;nsid=1118754805;c=1731761123019) 2024-11-16T12:45:25,720 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb51d70b080759801 with lease ID 0xb1840554c5f7bde1: from storage DS-5993f530-0256-445a-b7b9-7b5096d9dafd node DatanodeRegistration(127.0.0.1:39369, datanodeUuid=b1151d9b-99ed-41d9-bf9c-a2d4affbbce3, infoPort=39415, infoSecurePort=0, ipcPort=43717, storageInfo=lv=-57;cid=testClusterID;nsid=1118754805;c=1731761123019), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-16T12:45:25,720 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb8049900c8dda9a8 with lease ID 0xb1840554c5f7bde0: Processing first storage report for DS-a8e51288-43fd-49c8-b645-bb5e0d84bd08 from datanode DatanodeRegistration(127.0.0.1:44635, datanodeUuid=ae7f4d77-29d3-4e05-9545-1d08a753d915, infoPort=43667, infoSecurePort=0, ipcPort=42063, storageInfo=lv=-57;cid=testClusterID;nsid=1118754805;c=1731761123019) 2024-11-16T12:45:25,721 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb8049900c8dda9a8 with lease ID 0xb1840554c5f7bde0: from storage DS-a8e51288-43fd-49c8-b645-bb5e0d84bd08 node DatanodeRegistration(127.0.0.1:44635, datanodeUuid=ae7f4d77-29d3-4e05-9545-1d08a753d915, infoPort=43667, infoSecurePort=0, ipcPort=42063, storageInfo=lv=-57;cid=testClusterID;nsid=1118754805;c=1731761123019), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T12:45:25,721 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb51d70b080759801 with lease ID 0xb1840554c5f7bde1: Processing first storage report for DS-86107aa5-dc3e-4f0d-8a83-66eb331a920a from datanode DatanodeRegistration(127.0.0.1:39369, datanodeUuid=b1151d9b-99ed-41d9-bf9c-a2d4affbbce3, infoPort=39415, infoSecurePort=0, ipcPort=43717, storageInfo=lv=-57;cid=testClusterID;nsid=1118754805;c=1731761123019) 2024-11-16T12:45:25,721 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb51d70b080759801 with lease ID 0xb1840554c5f7bde1: from storage DS-86107aa5-dc3e-4f0d-8a83-66eb331a920a node DatanodeRegistration(127.0.0.1:39369, datanodeUuid=b1151d9b-99ed-41d9-bf9c-a2d4affbbce3, infoPort=39415, infoSecurePort=0, ipcPort=43717, storageInfo=lv=-57;cid=testClusterID;nsid=1118754805;c=1731761123019), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T12:45:25,722 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb8049900c8dda9a8 with lease ID 0xb1840554c5f7bde0: Processing first storage report for DS-824caf7d-6f40-4a2c-b5c1-e552c2dec77d from datanode DatanodeRegistration(127.0.0.1:44635, datanodeUuid=ae7f4d77-29d3-4e05-9545-1d08a753d915, infoPort=43667, infoSecurePort=0, ipcPort=42063, storageInfo=lv=-57;cid=testClusterID;nsid=1118754805;c=1731761123019) 2024-11-16T12:45:25,722 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb8049900c8dda9a8 with lease ID 0xb1840554c5f7bde0: from storage DS-824caf7d-6f40-4a2c-b5c1-e552c2dec77d node DatanodeRegistration(127.0.0.1:44635, datanodeUuid=ae7f4d77-29d3-4e05-9545-1d08a753d915, infoPort=43667, infoSecurePort=0, ipcPort=42063, storageInfo=lv=-57;cid=testClusterID;nsid=1118754805;c=1731761123019), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T12:45:25,762 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/edd6923c-0677-90e0-cc1c-9bbca94963aa 2024-11-16T12:45:25,820 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/edd6923c-0677-90e0-cc1c-9bbca94963aa/cluster_b35c52b3-6cb1-7a8b-5887-65afa3b95417/zookeeper_0, clientPort=57386, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/edd6923c-0677-90e0-cc1c-9bbca94963aa/cluster_b35c52b3-6cb1-7a8b-5887-65afa3b95417/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/edd6923c-0677-90e0-cc1c-9bbca94963aa/cluster_b35c52b3-6cb1-7a8b-5887-65afa3b95417/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-16T12:45:25,828 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=57386 2024-11-16T12:45:25,836 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T12:45:25,839 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T12:45:26,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44635 is added to blk_1073741825_1001 (size=7) 2024-11-16T12:45:26,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39369 is added to blk_1073741825_1001 (size=7) 2024-11-16T12:45:26,433 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922 with version=8 2024-11-16T12:45:26,433 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/hbase-staging 2024-11-16T12:45:26,512 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-16T12:45:26,749 INFO [Time-limited test {}] client.ConnectionUtils(128): master/0450ab8807f5:0 server-side Connection retries=45 2024-11-16T12:45:26,758 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T12:45:26,758 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-16T12:45:26,763 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-16T12:45:26,763 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T12:45:26,763 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-16T12:45:26,889 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-16T12:45:26,953 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-16T12:45:26,962 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-16T12:45:26,965 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-16T12:45:26,988 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 21280 (auto-detected) 2024-11-16T12:45:26,989 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-16T12:45:27,007 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34847 2024-11-16T12:45:27,035 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:34847 connecting to ZooKeeper ensemble=127.0.0.1:57386 2024-11-16T12:45:27,178 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:348470x0, quorum=127.0.0.1:57386, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-16T12:45:27,180 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:34847-0x10144f74c720000 connected 2024-11-16T12:45:27,263 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T12:45:27,266 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T12:45:27,276 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34847-0x10144f74c720000, quorum=127.0.0.1:57386, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T12:45:27,280 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922, hbase.cluster.distributed=false 2024-11-16T12:45:27,300 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34847-0x10144f74c720000, quorum=127.0.0.1:57386, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-16T12:45:27,304 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34847 2024-11-16T12:45:27,305 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34847 2024-11-16T12:45:27,305 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34847 2024-11-16T12:45:27,306 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34847 2024-11-16T12:45:27,306 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34847 2024-11-16T12:45:27,409 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/0450ab8807f5:0 server-side Connection retries=45 2024-11-16T12:45:27,411 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T12:45:27,411 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-16T12:45:27,411 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-16T12:45:27,411 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T12:45:27,412 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-16T12:45:27,414 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-16T12:45:27,417 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-16T12:45:27,418 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33173 2024-11-16T12:45:27,421 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:33173 connecting to ZooKeeper ensemble=127.0.0.1:57386 2024-11-16T12:45:27,422 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T12:45:27,427 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T12:45:27,449 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:331730x0, quorum=127.0.0.1:57386, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-16T12:45:27,450 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:331730x0, quorum=127.0.0.1:57386, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T12:45:27,450 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:33173-0x10144f74c720001 connected 2024-11-16T12:45:27,453 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-16T12:45:27,460 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-16T12:45:27,462 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33173-0x10144f74c720001, quorum=127.0.0.1:57386, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-16T12:45:27,466 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33173-0x10144f74c720001, quorum=127.0.0.1:57386, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-16T12:45:27,467 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33173 2024-11-16T12:45:27,468 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33173 2024-11-16T12:45:27,468 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33173 2024-11-16T12:45:27,469 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33173 2024-11-16T12:45:27,469 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33173 2024-11-16T12:45:27,486 DEBUG [M:0;0450ab8807f5:34847 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;0450ab8807f5:34847 2024-11-16T12:45:27,487 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/0450ab8807f5,34847,1731761126600 2024-11-16T12:45:27,502 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34847-0x10144f74c720000, quorum=127.0.0.1:57386, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T12:45:27,502 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33173-0x10144f74c720001, quorum=127.0.0.1:57386, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T12:45:27,505 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34847-0x10144f74c720000, quorum=127.0.0.1:57386, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/0450ab8807f5,34847,1731761126600 2024-11-16T12:45:27,535 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33173-0x10144f74c720001, quorum=127.0.0.1:57386, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-16T12:45:27,535 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34847-0x10144f74c720000, quorum=127.0.0.1:57386, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:45:27,535 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33173-0x10144f74c720001, quorum=127.0.0.1:57386, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:45:27,536 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34847-0x10144f74c720000, quorum=127.0.0.1:57386, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-16T12:45:27,537 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/0450ab8807f5,34847,1731761126600 from backup master directory 2024-11-16T12:45:27,549 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33173-0x10144f74c720001, quorum=127.0.0.1:57386, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T12:45:27,549 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34847-0x10144f74c720000, quorum=127.0.0.1:57386, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/0450ab8807f5,34847,1731761126600 2024-11-16T12:45:27,550 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34847-0x10144f74c720000, quorum=127.0.0.1:57386, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T12:45:27,551 WARN [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-16T12:45:27,551 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=0450ab8807f5,34847,1731761126600 2024-11-16T12:45:27,554 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-16T12:45:27,556 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-16T12:45:27,627 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/hbase.id] with ID: 0afe5e14-29f8-46c9-8fcc-a7636735a77c 2024-11-16T12:45:27,628 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/.tmp/hbase.id 2024-11-16T12:45:27,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39369 is added to blk_1073741826_1002 (size=42) 2024-11-16T12:45:27,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44635 is added to blk_1073741826_1002 (size=42) 2024-11-16T12:45:27,650 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/.tmp/hbase.id]:[hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/hbase.id] 2024-11-16T12:45:27,691 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T12:45:27,696 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-16T12:45:27,713 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 16ms. 2024-11-16T12:45:27,751 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34847-0x10144f74c720000, quorum=127.0.0.1:57386, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:45:27,751 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33173-0x10144f74c720001, quorum=127.0.0.1:57386, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:45:27,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39369 is added to blk_1073741827_1003 (size=196) 2024-11-16T12:45:27,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44635 is added to blk_1073741827_1003 (size=196) 2024-11-16T12:45:27,786 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-16T12:45:27,788 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-16T12:45:27,793 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T12:45:27,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39369 is added to blk_1073741828_1004 (size=1189) 2024-11-16T12:45:27,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44635 is added to blk_1073741828_1004 (size=1189) 2024-11-16T12:45:27,836 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/MasterData/data/master/store 2024-11-16T12:45:27,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39369 is added to blk_1073741829_1005 (size=34) 2024-11-16T12:45:27,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44635 is added to blk_1073741829_1005 (size=34) 2024-11-16T12:45:27,858 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-16T12:45:27,861 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T12:45:27,862 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-16T12:45:27,863 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T12:45:27,863 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T12:45:27,864 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-16T12:45:27,864 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T12:45:27,865 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T12:45:27,866 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731761127862Disabling compacts and flushes for region at 1731761127862Disabling writes for close at 1731761127864 (+2 ms)Writing region close event to WAL at 1731761127865 (+1 ms)Closed at 1731761127865 2024-11-16T12:45:27,868 WARN [master/0450ab8807f5:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/MasterData/data/master/store/.initializing 2024-11-16T12:45:27,868 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/MasterData/WALs/0450ab8807f5,34847,1731761126600 2024-11-16T12:45:27,888 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0450ab8807f5%2C34847%2C1731761126600, suffix=, logDir=hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/MasterData/WALs/0450ab8807f5,34847,1731761126600, archiveDir=hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/MasterData/oldWALs, maxLogs=10 2024-11-16T12:45:27,895 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0450ab8807f5%2C34847%2C1731761126600.1731761127892 2024-11-16T12:45:27,914 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/MasterData/WALs/0450ab8807f5,34847,1731761126600/0450ab8807f5%2C34847%2C1731761126600.1731761127892 2024-11-16T12:45:27,924 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39415:39415),(127.0.0.1/127.0.0.1:43667:43667)] 2024-11-16T12:45:27,926 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-16T12:45:27,926 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T12:45:27,930 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T12:45:27,931 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T12:45:27,969 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T12:45:27,993 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-16T12:45:27,996 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:45:27,998 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T12:45:27,998 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T12:45:28,001 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-16T12:45:28,002 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:45:28,003 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T12:45:28,003 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T12:45:28,006 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-16T12:45:28,006 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:45:28,007 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T12:45:28,007 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T12:45:28,010 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-16T12:45:28,010 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:45:28,011 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T12:45:28,011 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T12:45:28,015 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-16T12:45:28,016 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-16T12:45:28,021 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T12:45:28,022 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T12:45:28,026 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-16T12:45:28,030 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T12:45:28,040 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T12:45:28,041 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=743834, jitterRate=-0.05416707694530487}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-16T12:45:28,049 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731761127942Initializing all the Stores at 1731761127944 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731761127945 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731761127945Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731761127946 (+1 ms)Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731761127946Cleaning up temporary data from old regions at 1731761128022 (+76 ms)Region opened successfully at 1731761128048 (+26 ms) 2024-11-16T12:45:28,050 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-16T12:45:28,080 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6e3f4033, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=0450ab8807f5/172.17.0.2:0 2024-11-16T12:45:28,108 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-16T12:45:28,117 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-16T12:45:28,117 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-16T12:45:28,120 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-16T12:45:28,121 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-16T12:45:28,126 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 4 msec 2024-11-16T12:45:28,126 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-16T12:45:28,148 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-16T12:45:28,156 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34847-0x10144f74c720000, quorum=127.0.0.1:57386, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-16T12:45:28,215 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-16T12:45:28,220 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-16T12:45:28,223 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34847-0x10144f74c720000, quorum=127.0.0.1:57386, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-16T12:45:28,234 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-16T12:45:28,237 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-16T12:45:28,241 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34847-0x10144f74c720000, quorum=127.0.0.1:57386, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-16T12:45:28,251 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-16T12:45:28,254 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34847-0x10144f74c720000, quorum=127.0.0.1:57386, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-16T12:45:28,265 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-16T12:45:28,281 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34847-0x10144f74c720000, quorum=127.0.0.1:57386, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-16T12:45:28,290 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-16T12:45:28,301 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33173-0x10144f74c720001, quorum=127.0.0.1:57386, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-16T12:45:28,301 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34847-0x10144f74c720000, quorum=127.0.0.1:57386, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-16T12:45:28,302 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33173-0x10144f74c720001, quorum=127.0.0.1:57386, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:45:28,302 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34847-0x10144f74c720000, quorum=127.0.0.1:57386, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:45:28,306 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=0450ab8807f5,34847,1731761126600, sessionid=0x10144f74c720000, setting cluster-up flag (Was=false) 2024-11-16T12:45:28,335 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33173-0x10144f74c720001, quorum=127.0.0.1:57386, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:45:28,335 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34847-0x10144f74c720000, quorum=127.0.0.1:57386, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:45:28,360 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-16T12:45:28,365 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=0450ab8807f5,34847,1731761126600 2024-11-16T12:45:28,385 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33173-0x10144f74c720001, quorum=127.0.0.1:57386, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:45:28,385 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34847-0x10144f74c720000, quorum=127.0.0.1:57386, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:45:28,409 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-16T12:45:28,412 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=0450ab8807f5,34847,1731761126600 2024-11-16T12:45:28,421 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-16T12:45:28,474 INFO [RS:0;0450ab8807f5:33173 {}] regionserver.HRegionServer(746): ClusterId : 0afe5e14-29f8-46c9-8fcc-a7636735a77c 2024-11-16T12:45:28,477 DEBUG [RS:0;0450ab8807f5:33173 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-16T12:45:28,488 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-16T12:45:28,497 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-16T12:45:28,504 DEBUG [RS:0;0450ab8807f5:33173 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-16T12:45:28,504 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-16T12:45:28,504 DEBUG [RS:0;0450ab8807f5:33173 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-16T12:45:28,509 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 0450ab8807f5,34847,1731761126600 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-16T12:45:28,517 DEBUG [RS:0;0450ab8807f5:33173 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-16T12:45:28,517 DEBUG [RS:0;0450ab8807f5:33173 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1944a1f2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=0450ab8807f5/172.17.0.2:0 2024-11-16T12:45:28,518 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/0450ab8807f5:0, corePoolSize=5, maxPoolSize=5 2024-11-16T12:45:28,518 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/0450ab8807f5:0, corePoolSize=5, maxPoolSize=5 2024-11-16T12:45:28,518 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/0450ab8807f5:0, corePoolSize=5, maxPoolSize=5 2024-11-16T12:45:28,518 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/0450ab8807f5:0, corePoolSize=5, maxPoolSize=5 2024-11-16T12:45:28,518 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/0450ab8807f5:0, corePoolSize=10, maxPoolSize=10 2024-11-16T12:45:28,519 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:45:28,519 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/0450ab8807f5:0, corePoolSize=2, maxPoolSize=2 2024-11-16T12:45:28,519 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:45:28,522 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731761158522 2024-11-16T12:45:28,524 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-16T12:45:28,525 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T12:45:28,525 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-16T12:45:28,525 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-16T12:45:28,529 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-16T12:45:28,530 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-16T12:45:28,530 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:45:28,530 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-16T12:45:28,531 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-16T12:45:28,530 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-16T12:45:28,531 DEBUG [RS:0;0450ab8807f5:33173 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;0450ab8807f5:33173 2024-11-16T12:45:28,531 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-16T12:45:28,534 INFO [RS:0;0450ab8807f5:33173 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-16T12:45:28,534 INFO [RS:0;0450ab8807f5:33173 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-16T12:45:28,535 DEBUG [RS:0;0450ab8807f5:33173 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-16T12:45:28,535 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-16T12:45:28,536 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-16T12:45:28,536 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-16T12:45:28,538 INFO [RS:0;0450ab8807f5:33173 {}] regionserver.HRegionServer(2659): reportForDuty to master=0450ab8807f5,34847,1731761126600 with port=33173, startcode=1731761127376 2024-11-16T12:45:28,540 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-16T12:45:28,540 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-16T12:45:28,542 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/0450ab8807f5:0:becomeActiveMaster-HFileCleaner.large.0-1731761128541,5,FailOnTimeoutGroup] 2024-11-16T12:45:28,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39369 is added to blk_1073741831_1007 (size=1321) 2024-11-16T12:45:28,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44635 is added to blk_1073741831_1007 (size=1321) 2024-11-16T12:45:28,545 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-16T12:45:28,546 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922 2024-11-16T12:45:28,546 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/0450ab8807f5:0:becomeActiveMaster-HFileCleaner.small.0-1731761128542,5,FailOnTimeoutGroup] 2024-11-16T12:45:28,547 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-16T12:45:28,547 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-16T12:45:28,548 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-16T12:45:28,548 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-16T12:45:28,552 DEBUG [RS:0;0450ab8807f5:33173 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-16T12:45:28,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44635 is added to blk_1073741832_1008 (size=32) 2024-11-16T12:45:28,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39369 is added to blk_1073741832_1008 (size=32) 2024-11-16T12:45:28,559 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T12:45:28,562 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-16T12:45:28,564 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-16T12:45:28,564 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:45:28,566 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T12:45:28,566 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-16T12:45:28,569 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-16T12:45:28,569 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:45:28,570 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T12:45:28,570 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-16T12:45:28,572 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-16T12:45:28,572 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:45:28,573 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T12:45:28,574 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-16T12:45:28,576 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-16T12:45:28,576 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:45:28,578 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T12:45:28,578 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-16T12:45:28,579 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/data/hbase/meta/1588230740 2024-11-16T12:45:28,581 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/data/hbase/meta/1588230740 2024-11-16T12:45:28,585 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-16T12:45:28,585 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-16T12:45:28,586 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-16T12:45:28,589 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-16T12:45:28,594 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T12:45:28,595 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=717761, jitterRate=-0.08732050657272339}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-16T12:45:28,601 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731761128560Initializing all the Stores at 1731761128561 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731761128561Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731761128561Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731761128562 (+1 ms)Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731761128562Cleaning up temporary data from old regions at 1731761128585 (+23 ms)Region opened successfully at 1731761128601 (+16 ms) 2024-11-16T12:45:28,601 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-16T12:45:28,601 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-16T12:45:28,602 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-16T12:45:28,602 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-16T12:45:28,602 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-16T12:45:28,603 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-16T12:45:28,603 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731761128601Disabling compacts and flushes for region at 1731761128601Disabling writes for close at 1731761128602 (+1 ms)Writing region close event to WAL at 1731761128603 (+1 ms)Closed at 1731761128603 2024-11-16T12:45:28,607 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T12:45:28,607 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-16T12:45:28,615 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-16T12:45:28,623 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-16T12:45:28,626 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-16T12:45:28,628 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35647, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-16T12:45:28,634 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34847 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 0450ab8807f5,33173,1731761127376 2024-11-16T12:45:28,637 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34847 {}] master.ServerManager(517): Registering regionserver=0450ab8807f5,33173,1731761127376 2024-11-16T12:45:28,648 DEBUG [RS:0;0450ab8807f5:33173 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922 2024-11-16T12:45:28,649 DEBUG [RS:0;0450ab8807f5:33173 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:36417 2024-11-16T12:45:28,649 DEBUG [RS:0;0450ab8807f5:33173 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-16T12:45:28,684 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34847-0x10144f74c720000, quorum=127.0.0.1:57386, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-16T12:45:28,685 DEBUG [RS:0;0450ab8807f5:33173 {}] zookeeper.ZKUtil(111): regionserver:33173-0x10144f74c720001, quorum=127.0.0.1:57386, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/0450ab8807f5,33173,1731761127376 2024-11-16T12:45:28,685 WARN [RS:0;0450ab8807f5:33173 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-16T12:45:28,686 INFO [RS:0;0450ab8807f5:33173 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T12:45:28,686 DEBUG [RS:0;0450ab8807f5:33173 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/WALs/0450ab8807f5,33173,1731761127376 2024-11-16T12:45:28,688 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [0450ab8807f5,33173,1731761127376] 2024-11-16T12:45:28,712 INFO [RS:0;0450ab8807f5:33173 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-16T12:45:28,726 INFO [RS:0;0450ab8807f5:33173 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-16T12:45:28,731 INFO [RS:0;0450ab8807f5:33173 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-16T12:45:28,731 INFO [RS:0;0450ab8807f5:33173 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T12:45:28,732 INFO [RS:0;0450ab8807f5:33173 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-16T12:45:28,737 INFO [RS:0;0450ab8807f5:33173 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-16T12:45:28,738 INFO [RS:0;0450ab8807f5:33173 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-16T12:45:28,738 DEBUG [RS:0;0450ab8807f5:33173 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:45:28,738 DEBUG [RS:0;0450ab8807f5:33173 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:45:28,738 DEBUG [RS:0;0450ab8807f5:33173 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:45:28,738 DEBUG [RS:0;0450ab8807f5:33173 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:45:28,739 DEBUG [RS:0;0450ab8807f5:33173 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:45:28,739 DEBUG [RS:0;0450ab8807f5:33173 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/0450ab8807f5:0, corePoolSize=2, maxPoolSize=2 2024-11-16T12:45:28,739 DEBUG [RS:0;0450ab8807f5:33173 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:45:28,739 DEBUG [RS:0;0450ab8807f5:33173 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:45:28,739 DEBUG [RS:0;0450ab8807f5:33173 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:45:28,739 DEBUG [RS:0;0450ab8807f5:33173 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:45:28,739 DEBUG [RS:0;0450ab8807f5:33173 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:45:28,739 DEBUG [RS:0;0450ab8807f5:33173 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:45:28,740 DEBUG [RS:0;0450ab8807f5:33173 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/0450ab8807f5:0, corePoolSize=3, maxPoolSize=3 2024-11-16T12:45:28,740 DEBUG [RS:0;0450ab8807f5:33173 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/0450ab8807f5:0, corePoolSize=3, maxPoolSize=3 2024-11-16T12:45:28,740 INFO [RS:0;0450ab8807f5:33173 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T12:45:28,741 INFO [RS:0;0450ab8807f5:33173 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T12:45:28,741 INFO [RS:0;0450ab8807f5:33173 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T12:45:28,741 INFO [RS:0;0450ab8807f5:33173 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-16T12:45:28,741 INFO [RS:0;0450ab8807f5:33173 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-16T12:45:28,741 INFO [RS:0;0450ab8807f5:33173 {}] hbase.ChoreService(168): Chore ScheduledChore name=0450ab8807f5,33173,1731761127376-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-16T12:45:28,757 INFO [RS:0;0450ab8807f5:33173 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-16T12:45:28,758 INFO [RS:0;0450ab8807f5:33173 {}] hbase.ChoreService(168): Chore ScheduledChore name=0450ab8807f5,33173,1731761127376-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T12:45:28,758 INFO [RS:0;0450ab8807f5:33173 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T12:45:28,759 INFO [RS:0;0450ab8807f5:33173 {}] regionserver.Replication(171): 0450ab8807f5,33173,1731761127376 started 2024-11-16T12:45:28,775 INFO [RS:0;0450ab8807f5:33173 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T12:45:28,776 INFO [RS:0;0450ab8807f5:33173 {}] regionserver.HRegionServer(1482): Serving as 0450ab8807f5,33173,1731761127376, RpcServer on 0450ab8807f5/172.17.0.2:33173, sessionid=0x10144f74c720001 2024-11-16T12:45:28,776 WARN [0450ab8807f5:34847 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-16T12:45:28,777 DEBUG [RS:0;0450ab8807f5:33173 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-16T12:45:28,777 DEBUG [RS:0;0450ab8807f5:33173 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 0450ab8807f5,33173,1731761127376 2024-11-16T12:45:28,777 DEBUG [RS:0;0450ab8807f5:33173 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '0450ab8807f5,33173,1731761127376' 2024-11-16T12:45:28,777 DEBUG [RS:0;0450ab8807f5:33173 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-16T12:45:28,778 DEBUG [RS:0;0450ab8807f5:33173 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-16T12:45:28,779 DEBUG [RS:0;0450ab8807f5:33173 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-16T12:45:28,779 DEBUG [RS:0;0450ab8807f5:33173 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-16T12:45:28,779 DEBUG [RS:0;0450ab8807f5:33173 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 0450ab8807f5,33173,1731761127376 2024-11-16T12:45:28,779 DEBUG [RS:0;0450ab8807f5:33173 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '0450ab8807f5,33173,1731761127376' 2024-11-16T12:45:28,780 DEBUG [RS:0;0450ab8807f5:33173 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-16T12:45:28,780 DEBUG [RS:0;0450ab8807f5:33173 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-16T12:45:28,781 DEBUG [RS:0;0450ab8807f5:33173 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-16T12:45:28,781 INFO [RS:0;0450ab8807f5:33173 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-16T12:45:28,781 INFO [RS:0;0450ab8807f5:33173 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-16T12:45:28,891 INFO [RS:0;0450ab8807f5:33173 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0450ab8807f5%2C33173%2C1731761127376, suffix=, logDir=hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/WALs/0450ab8807f5,33173,1731761127376, archiveDir=hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/oldWALs, maxLogs=32 2024-11-16T12:45:28,895 INFO [RS:0;0450ab8807f5:33173 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0450ab8807f5%2C33173%2C1731761127376.1731761128895 2024-11-16T12:45:28,903 INFO [RS:0;0450ab8807f5:33173 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/WALs/0450ab8807f5,33173,1731761127376/0450ab8807f5%2C33173%2C1731761127376.1731761128895 2024-11-16T12:45:28,906 DEBUG [RS:0;0450ab8807f5:33173 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43667:43667),(127.0.0.1/127.0.0.1:39415:39415)] 2024-11-16T12:45:29,031 DEBUG [0450ab8807f5:34847 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-16T12:45:29,042 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=0450ab8807f5,33173,1731761127376 2024-11-16T12:45:29,047 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 0450ab8807f5,33173,1731761127376, state=OPENING 2024-11-16T12:45:29,099 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-16T12:45:29,110 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33173-0x10144f74c720001, quorum=127.0.0.1:57386, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:45:29,110 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34847-0x10144f74c720000, quorum=127.0.0.1:57386, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:45:29,112 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T12:45:29,113 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T12:45:29,115 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-16T12:45:29,119 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=0450ab8807f5,33173,1731761127376}] 2024-11-16T12:45:29,299 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-16T12:45:29,302 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41997, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-16T12:45:29,311 INFO [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-16T12:45:29,312 INFO [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T12:45:29,316 INFO [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0450ab8807f5%2C33173%2C1731761127376.meta, suffix=.meta, logDir=hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/WALs/0450ab8807f5,33173,1731761127376, archiveDir=hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/oldWALs, maxLogs=32 2024-11-16T12:45:29,318 INFO [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 0450ab8807f5%2C33173%2C1731761127376.meta.1731761129317.meta 2024-11-16T12:45:29,324 INFO [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/WALs/0450ab8807f5,33173,1731761127376/0450ab8807f5%2C33173%2C1731761127376.meta.1731761129317.meta 2024-11-16T12:45:29,327 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39415:39415),(127.0.0.1/127.0.0.1:43667:43667)] 2024-11-16T12:45:29,330 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-16T12:45:29,332 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-16T12:45:29,335 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-16T12:45:29,340 INFO [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-16T12:45:29,343 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-16T12:45:29,344 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T12:45:29,344 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-16T12:45:29,344 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-16T12:45:29,347 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-16T12:45:29,348 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-16T12:45:29,348 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:45:29,349 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T12:45:29,349 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-16T12:45:29,351 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-16T12:45:29,351 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:45:29,352 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T12:45:29,352 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-16T12:45:29,354 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-16T12:45:29,354 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:45:29,355 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T12:45:29,355 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-16T12:45:29,356 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-16T12:45:29,356 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:45:29,357 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T12:45:29,357 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-16T12:45:29,359 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/data/hbase/meta/1588230740 2024-11-16T12:45:29,361 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/data/hbase/meta/1588230740 2024-11-16T12:45:29,363 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-16T12:45:29,363 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-16T12:45:29,364 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-16T12:45:29,366 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-16T12:45:29,368 INFO [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=820371, jitterRate=0.04315638542175293}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-16T12:45:29,368 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-16T12:45:29,369 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731761129345Writing region info on filesystem at 1731761129345Initializing all the Stores at 1731761129346 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731761129346Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731761129347 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731761129347Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731761129347Cleaning up temporary data from old regions at 1731761129363 (+16 ms)Running coprocessor post-open hooks at 1731761129368 (+5 ms)Region opened successfully at 1731761129369 (+1 ms) 2024-11-16T12:45:29,374 INFO [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731761129289 2024-11-16T12:45:29,384 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-16T12:45:29,385 INFO [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-16T12:45:29,386 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=0450ab8807f5,33173,1731761127376 2024-11-16T12:45:29,388 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 0450ab8807f5,33173,1731761127376, state=OPEN 2024-11-16T12:45:29,449 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34847-0x10144f74c720000, quorum=127.0.0.1:57386, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-16T12:45:29,449 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33173-0x10144f74c720001, quorum=127.0.0.1:57386, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-16T12:45:29,450 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T12:45:29,450 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T12:45:29,450 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=0450ab8807f5,33173,1731761127376 2024-11-16T12:45:29,455 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-16T12:45:29,456 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=0450ab8807f5,33173,1731761127376 in 332 msec 2024-11-16T12:45:29,462 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-16T12:45:29,462 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 843 msec 2024-11-16T12:45:29,464 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T12:45:29,464 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-16T12:45:29,482 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T12:45:29,483 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=0450ab8807f5,33173,1731761127376, seqNum=-1] 2024-11-16T12:45:29,500 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T12:45:29,501 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55701, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T12:45:29,521 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.0720 sec 2024-11-16T12:45:29,522 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731761129521, completionTime=-1 2024-11-16T12:45:29,524 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-16T12:45:29,524 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-16T12:45:29,550 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-16T12:45:29,550 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731761189550 2024-11-16T12:45:29,550 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731761249550 2024-11-16T12:45:29,550 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 25 msec 2024-11-16T12:45:29,553 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0450ab8807f5,34847,1731761126600-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T12:45:29,553 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0450ab8807f5,34847,1731761126600-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T12:45:29,553 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0450ab8807f5,34847,1731761126600-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T12:45:29,555 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-0450ab8807f5:34847, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T12:45:29,555 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-16T12:45:29,556 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-16T12:45:29,561 DEBUG [master/0450ab8807f5:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-16T12:45:29,584 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.032sec 2024-11-16T12:45:29,585 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-16T12:45:29,586 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-16T12:45:29,587 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-16T12:45:29,588 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-16T12:45:29,588 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-16T12:45:29,589 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0450ab8807f5,34847,1731761126600-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-16T12:45:29,589 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0450ab8807f5,34847,1731761126600-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-16T12:45:29,597 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-16T12:45:29,598 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-16T12:45:29,599 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0450ab8807f5,34847,1731761126600-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T12:45:29,686 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@358a6cb3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T12:45:29,689 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-16T12:45:29,689 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-16T12:45:29,693 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 0450ab8807f5,34847,-1 for getting cluster id 2024-11-16T12:45:29,697 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-16T12:45:29,705 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '0afe5e14-29f8-46c9-8fcc-a7636735a77c' 2024-11-16T12:45:29,709 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-16T12:45:29,709 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "0afe5e14-29f8-46c9-8fcc-a7636735a77c" 2024-11-16T12:45:29,712 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5ff7fb09, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T12:45:29,712 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [0450ab8807f5,34847,-1] 2024-11-16T12:45:29,714 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-16T12:45:29,717 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T12:45:29,718 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57122, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-16T12:45:29,721 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@19f64e80, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T12:45:29,721 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T12:45:29,728 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=0450ab8807f5,33173,1731761127376, seqNum=-1] 2024-11-16T12:45:29,729 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T12:45:29,731 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41558, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T12:45:29,750 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=0450ab8807f5,34847,1731761126600 2024-11-16T12:45:29,750 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T12:45:29,757 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-16T12:45:29,761 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-16T12:45:29,766 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 0450ab8807f5,34847,1731761126600 2024-11-16T12:45:29,768 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@2081a17b 2024-11-16T12:45:29,769 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-16T12:45:29,772 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57134, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-16T12:45:29,774 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34847 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-16T12:45:29,774 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34847 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-16T12:45:29,779 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34847 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testSlowSyncLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-16T12:45:29,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34847 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling 2024-11-16T12:45:29,791 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-16T12:45:29,793 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34847 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testSlowSyncLogRolling" procId is: 4 2024-11-16T12:45:29,793 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:45:29,796 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-16T12:45:29,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34847 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-16T12:45:29,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44635 is added to blk_1073741835_1011 (size=389) 2024-11-16T12:45:29,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39369 is added to blk_1073741835_1011 (size=389) 2024-11-16T12:45:29,846 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 74dd56f7af29124799c6b75ebf614235, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1731761129773.74dd56f7af29124799c6b75ebf614235.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testSlowSyncLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922 2024-11-16T12:45:29,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44635 is added to blk_1073741836_1012 (size=72) 2024-11-16T12:45:29,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39369 is added to blk_1073741836_1012 (size=72) 2024-11-16T12:45:29,859 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1731761129773.74dd56f7af29124799c6b75ebf614235.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T12:45:29,859 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 74dd56f7af29124799c6b75ebf614235, disabling compactions & flushes 2024-11-16T12:45:29,860 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1731761129773.74dd56f7af29124799c6b75ebf614235. 2024-11-16T12:45:29,860 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1731761129773.74dd56f7af29124799c6b75ebf614235. 2024-11-16T12:45:29,860 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1731761129773.74dd56f7af29124799c6b75ebf614235. after waiting 0 ms 2024-11-16T12:45:29,860 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1731761129773.74dd56f7af29124799c6b75ebf614235. 2024-11-16T12:45:29,860 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1731761129773.74dd56f7af29124799c6b75ebf614235. 2024-11-16T12:45:29,860 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 74dd56f7af29124799c6b75ebf614235: Waiting for close lock at 1731761129859Disabling compacts and flushes for region at 1731761129859Disabling writes for close at 1731761129860 (+1 ms)Writing region close event to WAL at 1731761129860Closed at 1731761129860 2024-11-16T12:45:29,863 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-16T12:45:29,892 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testSlowSyncLogRolling,,1731761129773.74dd56f7af29124799c6b75ebf614235.","families":{"info":[{"qualifier":"regioninfo","vlen":71,"tag":[],"timestamp":"1731761129863"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731761129863"}]},"ts":"1731761129863"} 2024-11-16T12:45:29,898 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-16T12:45:29,901 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-16T12:45:29,904 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731761129901"}]},"ts":"1731761129901"} 2024-11-16T12:45:29,910 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLING in hbase:meta 2024-11-16T12:45:29,916 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=74dd56f7af29124799c6b75ebf614235, ASSIGN}] 2024-11-16T12:45:29,920 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=74dd56f7af29124799c6b75ebf614235, ASSIGN 2024-11-16T12:45:29,921 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=74dd56f7af29124799c6b75ebf614235, ASSIGN; state=OFFLINE, location=0450ab8807f5,33173,1731761127376; forceNewPlan=false, retain=false 2024-11-16T12:45:30,074 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=74dd56f7af29124799c6b75ebf614235, regionState=OPENING, regionLocation=0450ab8807f5,33173,1731761127376 2024-11-16T12:45:30,081 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=74dd56f7af29124799c6b75ebf614235, ASSIGN because future has completed 2024-11-16T12:45:30,082 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 74dd56f7af29124799c6b75ebf614235, server=0450ab8807f5,33173,1731761127376}] 2024-11-16T12:45:30,242 INFO [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testSlowSyncLogRolling,,1731761129773.74dd56f7af29124799c6b75ebf614235. 2024-11-16T12:45:30,243 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 74dd56f7af29124799c6b75ebf614235, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1731761129773.74dd56f7af29124799c6b75ebf614235.', STARTKEY => '', ENDKEY => ''} 2024-11-16T12:45:30,243 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testSlowSyncLogRolling 74dd56f7af29124799c6b75ebf614235 2024-11-16T12:45:30,243 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1731761129773.74dd56f7af29124799c6b75ebf614235.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T12:45:30,243 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 74dd56f7af29124799c6b75ebf614235 2024-11-16T12:45:30,244 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 74dd56f7af29124799c6b75ebf614235 2024-11-16T12:45:30,246 INFO [StoreOpener-74dd56f7af29124799c6b75ebf614235-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 74dd56f7af29124799c6b75ebf614235 2024-11-16T12:45:30,248 INFO [StoreOpener-74dd56f7af29124799c6b75ebf614235-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 74dd56f7af29124799c6b75ebf614235 columnFamilyName info 2024-11-16T12:45:30,248 DEBUG [StoreOpener-74dd56f7af29124799c6b75ebf614235-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:45:30,249 INFO [StoreOpener-74dd56f7af29124799c6b75ebf614235-1 {}] regionserver.HStore(327): Store=74dd56f7af29124799c6b75ebf614235/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T12:45:30,250 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 74dd56f7af29124799c6b75ebf614235 2024-11-16T12:45:30,251 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/data/default/TestLogRolling-testSlowSyncLogRolling/74dd56f7af29124799c6b75ebf614235 2024-11-16T12:45:30,251 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/data/default/TestLogRolling-testSlowSyncLogRolling/74dd56f7af29124799c6b75ebf614235 2024-11-16T12:45:30,252 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 74dd56f7af29124799c6b75ebf614235 2024-11-16T12:45:30,252 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 74dd56f7af29124799c6b75ebf614235 2024-11-16T12:45:30,255 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 74dd56f7af29124799c6b75ebf614235 2024-11-16T12:45:30,258 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/data/default/TestLogRolling-testSlowSyncLogRolling/74dd56f7af29124799c6b75ebf614235/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T12:45:30,259 INFO [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 74dd56f7af29124799c6b75ebf614235; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=843334, jitterRate=0.07235552370548248}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-16T12:45:30,259 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 74dd56f7af29124799c6b75ebf614235 2024-11-16T12:45:30,260 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 74dd56f7af29124799c6b75ebf614235: Running coprocessor pre-open hook at 1731761130244Writing region info on filesystem at 1731761130244Initializing all the Stores at 1731761130245 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731761130246 (+1 ms)Cleaning up temporary data from old regions at 1731761130252 (+6 ms)Running coprocessor post-open hooks at 1731761130259 (+7 ms)Region opened successfully at 1731761130260 (+1 ms) 2024-11-16T12:45:30,262 INFO [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testSlowSyncLogRolling,,1731761129773.74dd56f7af29124799c6b75ebf614235., pid=6, masterSystemTime=1731761130237 2024-11-16T12:45:30,266 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testSlowSyncLogRolling,,1731761129773.74dd56f7af29124799c6b75ebf614235. 2024-11-16T12:45:30,266 INFO [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testSlowSyncLogRolling,,1731761129773.74dd56f7af29124799c6b75ebf614235. 2024-11-16T12:45:30,267 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=74dd56f7af29124799c6b75ebf614235, regionState=OPEN, openSeqNum=2, regionLocation=0450ab8807f5,33173,1731761127376 2024-11-16T12:45:30,271 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 74dd56f7af29124799c6b75ebf614235, server=0450ab8807f5,33173,1731761127376 because future has completed 2024-11-16T12:45:30,277 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-16T12:45:30,277 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 74dd56f7af29124799c6b75ebf614235, server=0450ab8807f5,33173,1731761127376 in 191 msec 2024-11-16T12:45:30,281 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-16T12:45:30,281 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=74dd56f7af29124799c6b75ebf614235, ASSIGN in 364 msec 2024-11-16T12:45:30,283 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-16T12:45:30,284 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731761130283"}]},"ts":"1731761130283"} 2024-11-16T12:45:30,288 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLED in hbase:meta 2024-11-16T12:45:30,290 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-16T12:45:30,293 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling in 508 msec 2024-11-16T12:45:34,856 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-16T12:45:34,912 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-16T12:45:34,914 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testSlowSyncLogRolling' 2024-11-16T12:45:36,951 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-16T12:45:36,951 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-16T12:45:36,953 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-16T12:45:36,953 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-16T12:45:36,955 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-16T12:45:36,955 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-16T12:45:36,955 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-16T12:45:36,955 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-16T12:45:39,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34847 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-16T12:45:39,816 INFO [RPCClient-NioEventLoopGroup-4-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testSlowSyncLogRolling completed 2024-11-16T12:45:39,819 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testSlowSyncLogRolling,, stopping at row=TestLogRolling-testSlowSyncLogRolling ,, for max=2147483647 with caching=100 2024-11-16T12:45:39,828 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testSlowSyncLogRolling 2024-11-16T12:45:39,829 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testSlowSyncLogRolling,,1731761129773.74dd56f7af29124799c6b75ebf614235. 2024-11-16T12:45:39,829 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0450ab8807f5%2C33173%2C1731761127376.1731761139829 2024-11-16T12:45:39,850 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:45:39,850 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:45:39,850 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:45:39,850 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:45:39,850 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:45:39,851 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/WALs/0450ab8807f5,33173,1731761127376/0450ab8807f5%2C33173%2C1731761127376.1731761128895 with entries=1, filesize=443 B; new WAL /user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/WALs/0450ab8807f5,33173,1731761127376/0450ab8807f5%2C33173%2C1731761127376.1731761139829 2024-11-16T12:45:39,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39369 is added to blk_1073741833_1009 (size=451) 2024-11-16T12:45:39,863 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43667:43667),(127.0.0.1/127.0.0.1:39415:39415)] 2024-11-16T12:45:39,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44635 is added to blk_1073741833_1009 (size=451) 2024-11-16T12:45:39,863 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/WALs/0450ab8807f5,33173,1731761127376/0450ab8807f5%2C33173%2C1731761127376.1731761128895 is not closed yet, will try archiving it next time 2024-11-16T12:45:39,874 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testSlowSyncLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testSlowSyncLogRolling,,1731761129773.74dd56f7af29124799c6b75ebf614235., hostname=0450ab8807f5,33173,1731761127376, seqNum=2] 2024-11-16T12:45:40,259 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/WALs/0450ab8807f5,33173,1731761127376/0450ab8807f5%2C33173%2C1731761127376.1731761128895 to hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/oldWALs/0450ab8807f5%2C33173%2C1731761127376.1731761128895 2024-11-16T12:45:51,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(8855): Flush requested on 74dd56f7af29124799c6b75ebf614235 2024-11-16T12:45:51,951 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 74dd56f7af29124799c6b75ebf614235 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-16T12:45:52,019 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/data/default/TestLogRolling-testSlowSyncLogRolling/74dd56f7af29124799c6b75ebf614235/.tmp/info/5de943eceee242f2b30864e7e6e8a11a is 1080, key is row0001/info:/1731761139878/Put/seqid=0 2024-11-16T12:45:52,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44635 is added to blk_1073741838_1014 (size=12509) 2024-11-16T12:45:52,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39369 is added to blk_1073741838_1014 (size=12509) 2024-11-16T12:45:52,076 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/data/default/TestLogRolling-testSlowSyncLogRolling/74dd56f7af29124799c6b75ebf614235/.tmp/info/5de943eceee242f2b30864e7e6e8a11a 2024-11-16T12:45:52,147 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/data/default/TestLogRolling-testSlowSyncLogRolling/74dd56f7af29124799c6b75ebf614235/.tmp/info/5de943eceee242f2b30864e7e6e8a11a as hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/data/default/TestLogRolling-testSlowSyncLogRolling/74dd56f7af29124799c6b75ebf614235/info/5de943eceee242f2b30864e7e6e8a11a 2024-11-16T12:45:52,164 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/data/default/TestLogRolling-testSlowSyncLogRolling/74dd56f7af29124799c6b75ebf614235/info/5de943eceee242f2b30864e7e6e8a11a, entries=7, sequenceid=11, filesize=12.2 K 2024-11-16T12:45:52,174 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 74dd56f7af29124799c6b75ebf614235 in 219ms, sequenceid=11, compaction requested=false 2024-11-16T12:45:52,175 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 74dd56f7af29124799c6b75ebf614235: 2024-11-16T12:45:55,760 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-16T12:46:00,008 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0450ab8807f5%2C33173%2C1731761127376.1731761160007 2024-11-16T12:46:00,216 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 205 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44635,DS-a8e51288-43fd-49c8-b645-bb5e0d84bd08,DISK], DatanodeInfoWithStorage[127.0.0.1:39369,DS-5993f530-0256-445a-b7b9-7b5096d9dafd,DISK]] 2024-11-16T12:46:00,216 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:46:00,217 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:46:00,217 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:46:00,217 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:46:00,217 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:46:00,217 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/WALs/0450ab8807f5,33173,1731761127376/0450ab8807f5%2C33173%2C1731761127376.1731761139829 with entries=12, filesize=12.10 KB; new WAL /user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/WALs/0450ab8807f5,33173,1731761127376/0450ab8807f5%2C33173%2C1731761127376.1731761160007 2024-11-16T12:46:00,218 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39415:39415),(127.0.0.1/127.0.0.1:43667:43667)] 2024-11-16T12:46:00,218 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/WALs/0450ab8807f5,33173,1731761127376/0450ab8807f5%2C33173%2C1731761127376.1731761139829 is not closed yet, will try archiving it next time 2024-11-16T12:46:00,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39369 is added to blk_1073741837_1013 (size=12399) 2024-11-16T12:46:00,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44635 is added to blk_1073741837_1013 (size=12399) 2024-11-16T12:46:00,422 INFO [FSHLog-0-hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922-prefix:0450ab8807f5,33173,1731761127376 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39369,DS-5993f530-0256-445a-b7b9-7b5096d9dafd,DISK], DatanodeInfoWithStorage[127.0.0.1:44635,DS-a8e51288-43fd-49c8-b645-bb5e0d84bd08,DISK]] 2024-11-16T12:46:02,628 INFO [FSHLog-0-hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922-prefix:0450ab8807f5,33173,1731761127376 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39369,DS-5993f530-0256-445a-b7b9-7b5096d9dafd,DISK], DatanodeInfoWithStorage[127.0.0.1:44635,DS-a8e51288-43fd-49c8-b645-bb5e0d84bd08,DISK]] 2024-11-16T12:46:04,837 INFO [FSHLog-0-hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922-prefix:0450ab8807f5,33173,1731761127376 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39369,DS-5993f530-0256-445a-b7b9-7b5096d9dafd,DISK], DatanodeInfoWithStorage[127.0.0.1:44635,DS-a8e51288-43fd-49c8-b645-bb5e0d84bd08,DISK]] 2024-11-16T12:46:07,044 INFO [FSHLog-0-hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922-prefix:0450ab8807f5,33173,1731761127376 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39369,DS-5993f530-0256-445a-b7b9-7b5096d9dafd,DISK], DatanodeInfoWithStorage[127.0.0.1:44635,DS-a8e51288-43fd-49c8-b645-bb5e0d84bd08,DISK]] 2024-11-16T12:46:07,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(8855): Flush requested on 74dd56f7af29124799c6b75ebf614235 2024-11-16T12:46:07,044 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 74dd56f7af29124799c6b75ebf614235 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-16T12:46:07,247 INFO [FSHLog-0-hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922-prefix:0450ab8807f5,33173,1731761127376 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39369,DS-5993f530-0256-445a-b7b9-7b5096d9dafd,DISK], DatanodeInfoWithStorage[127.0.0.1:44635,DS-a8e51288-43fd-49c8-b645-bb5e0d84bd08,DISK]] 2024-11-16T12:46:07,258 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/data/default/TestLogRolling-testSlowSyncLogRolling/74dd56f7af29124799c6b75ebf614235/.tmp/info/5c409731945a4b31b834446612ae4228 is 1080, key is row0008/info:/1731761153952/Put/seqid=0 2024-11-16T12:46:07,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39369 is added to blk_1073741840_1016 (size=12509) 2024-11-16T12:46:07,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44635 is added to blk_1073741840_1016 (size=12509) 2024-11-16T12:46:07,268 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/data/default/TestLogRolling-testSlowSyncLogRolling/74dd56f7af29124799c6b75ebf614235/.tmp/info/5c409731945a4b31b834446612ae4228 2024-11-16T12:46:07,280 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/data/default/TestLogRolling-testSlowSyncLogRolling/74dd56f7af29124799c6b75ebf614235/.tmp/info/5c409731945a4b31b834446612ae4228 as hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/data/default/TestLogRolling-testSlowSyncLogRolling/74dd56f7af29124799c6b75ebf614235/info/5c409731945a4b31b834446612ae4228 2024-11-16T12:46:07,292 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/data/default/TestLogRolling-testSlowSyncLogRolling/74dd56f7af29124799c6b75ebf614235/info/5c409731945a4b31b834446612ae4228, entries=7, sequenceid=21, filesize=12.2 K 2024-11-16T12:46:07,494 INFO [FSHLog-0-hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922-prefix:0450ab8807f5,33173,1731761127376 {}] wal.AbstractFSWAL(1368): Slow sync cost: 200 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39369,DS-5993f530-0256-445a-b7b9-7b5096d9dafd,DISK], DatanodeInfoWithStorage[127.0.0.1:44635,DS-a8e51288-43fd-49c8-b645-bb5e0d84bd08,DISK]] 2024-11-16T12:46:07,494 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 74dd56f7af29124799c6b75ebf614235 in 450ms, sequenceid=21, compaction requested=false 2024-11-16T12:46:07,494 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 74dd56f7af29124799c6b75ebf614235: 2024-11-16T12:46:07,494 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=24.4 K, sizeToCheck=16.0 K 2024-11-16T12:46:07,494 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T12:46:07,495 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/data/default/TestLogRolling-testSlowSyncLogRolling/74dd56f7af29124799c6b75ebf614235/info/5de943eceee242f2b30864e7e6e8a11a because midkey is the same as first or last row 2024-11-16T12:46:09,248 INFO [FSHLog-0-hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922-prefix:0450ab8807f5,33173,1731761127376 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39369,DS-5993f530-0256-445a-b7b9-7b5096d9dafd,DISK], DatanodeInfoWithStorage[127.0.0.1:44635,DS-a8e51288-43fd-49c8-b645-bb5e0d84bd08,DISK]] 2024-11-16T12:46:09,601 INFO [master/0450ab8807f5:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-16T12:46:09,601 INFO [master/0450ab8807f5:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-16T12:46:11,452 INFO [FSHLog-0-hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922-prefix:0450ab8807f5,33173,1731761127376 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39369,DS-5993f530-0256-445a-b7b9-7b5096d9dafd,DISK], DatanodeInfoWithStorage[127.0.0.1:44635,DS-a8e51288-43fd-49c8-b645-bb5e0d84bd08,DISK]] 2024-11-16T12:46:11,455 WARN [FSHLog-0-hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922-prefix:0450ab8807f5,33173,1731761127376 {}] wal.AbstractFSWAL(2201): Requesting log roll because we exceeded slow sync threshold; count=8, threshold=5, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39369,DS-5993f530-0256-445a-b7b9-7b5096d9dafd,DISK], DatanodeInfoWithStorage[127.0.0.1:44635,DS-a8e51288-43fd-49c8-b645-bb5e0d84bd08,DISK]] 2024-11-16T12:46:11,456 DEBUG [regionserver/0450ab8807f5:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 0450ab8807f5%2C33173%2C1731761127376:(num 1731761160007) roll requested 2024-11-16T12:46:11,457 INFO [regionserver/0450ab8807f5:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0450ab8807f5%2C33173%2C1731761127376.1731761171457 2024-11-16T12:46:11,666 INFO [regionserver/0450ab8807f5:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 206 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39369,DS-5993f530-0256-445a-b7b9-7b5096d9dafd,DISK], DatanodeInfoWithStorage[127.0.0.1:44635,DS-a8e51288-43fd-49c8-b645-bb5e0d84bd08,DISK]] 2024-11-16T12:46:11,666 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:46:11,666 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:46:11,666 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:46:11,667 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:46:11,667 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:46:11,667 INFO [regionserver/0450ab8807f5:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/WALs/0450ab8807f5,33173,1731761127376/0450ab8807f5%2C33173%2C1731761127376.1731761160007 with entries=8, filesize=7.55 KB; new WAL /user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/WALs/0450ab8807f5,33173,1731761127376/0450ab8807f5%2C33173%2C1731761127376.1731761171457 2024-11-16T12:46:11,668 DEBUG [regionserver/0450ab8807f5:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43667:43667),(127.0.0.1/127.0.0.1:39415:39415)] 2024-11-16T12:46:11,668 DEBUG [regionserver/0450ab8807f5:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/WALs/0450ab8807f5,33173,1731761127376/0450ab8807f5%2C33173%2C1731761127376.1731761160007 is not closed yet, will try archiving it next time 2024-11-16T12:46:11,668 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/WALs/0450ab8807f5,33173,1731761127376/0450ab8807f5%2C33173%2C1731761127376.1731761139829 to hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/oldWALs/0450ab8807f5%2C33173%2C1731761127376.1731761139829 2024-11-16T12:46:11,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44635 is added to blk_1073741839_1015 (size=7739) 2024-11-16T12:46:11,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39369 is added to blk_1073741839_1015 (size=7739) 2024-11-16T12:46:13,656 INFO [FSHLog-0-hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922-prefix:0450ab8807f5,33173,1731761127376 {}] wal.AbstractFSWAL(1368): Slow sync cost: 200 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44635,DS-a8e51288-43fd-49c8-b645-bb5e0d84bd08,DISK], DatanodeInfoWithStorage[127.0.0.1:39369,DS-5993f530-0256-445a-b7b9-7b5096d9dafd,DISK]] 2024-11-16T12:46:15,244 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 74dd56f7af29124799c6b75ebf614235, had cached 0 bytes from a total of 25018 2024-11-16T12:46:15,863 INFO [FSHLog-0-hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922-prefix:0450ab8807f5,33173,1731761127376 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44635,DS-a8e51288-43fd-49c8-b645-bb5e0d84bd08,DISK], DatanodeInfoWithStorage[127.0.0.1:39369,DS-5993f530-0256-445a-b7b9-7b5096d9dafd,DISK]] 2024-11-16T12:46:18,069 INFO [FSHLog-0-hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922-prefix:0450ab8807f5,33173,1731761127376 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44635,DS-a8e51288-43fd-49c8-b645-bb5e0d84bd08,DISK], DatanodeInfoWithStorage[127.0.0.1:39369,DS-5993f530-0256-445a-b7b9-7b5096d9dafd,DISK]] 2024-11-16T12:46:20,279 INFO [FSHLog-0-hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922-prefix:0450ab8807f5,33173,1731761127376 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44635,DS-a8e51288-43fd-49c8-b645-bb5e0d84bd08,DISK], DatanodeInfoWithStorage[127.0.0.1:39369,DS-5993f530-0256-445a-b7b9-7b5096d9dafd,DISK]] 2024-11-16T12:46:22,281 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-16T12:46:22,282 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0450ab8807f5%2C33173%2C1731761127376.1731761182281 2024-11-16T12:46:25,761 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-16T12:46:27,290 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 5006 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44635,DS-a8e51288-43fd-49c8-b645-bb5e0d84bd08,DISK], DatanodeInfoWithStorage[127.0.0.1:39369,DS-5993f530-0256-445a-b7b9-7b5096d9dafd,DISK]] 2024-11-16T12:46:27,293 WARN [Time-limited test {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5006 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44635,DS-a8e51288-43fd-49c8-b645-bb5e0d84bd08,DISK], DatanodeInfoWithStorage[127.0.0.1:39369,DS-5993f530-0256-445a-b7b9-7b5096d9dafd,DISK]] 2024-11-16T12:46:27,293 DEBUG [regionserver/0450ab8807f5:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 0450ab8807f5%2C33173%2C1731761127376:(num 1731761182281) roll requested 2024-11-16T12:46:27,293 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:46:27,293 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:46:27,293 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:46:27,293 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:46:27,293 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:46:27,294 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/WALs/0450ab8807f5,33173,1731761127376/0450ab8807f5%2C33173%2C1731761127376.1731761171457 with entries=4, filesize=4.63 KB; new WAL /user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/WALs/0450ab8807f5,33173,1731761127376/0450ab8807f5%2C33173%2C1731761127376.1731761182281 2024-11-16T12:46:27,295 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43667:43667),(127.0.0.1/127.0.0.1:39415:39415)] 2024-11-16T12:46:27,295 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/WALs/0450ab8807f5,33173,1731761127376/0450ab8807f5%2C33173%2C1731761127376.1731761171457 is not closed yet, will try archiving it next time 2024-11-16T12:46:27,295 INFO [regionserver/0450ab8807f5:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0450ab8807f5%2C33173%2C1731761127376.1731761187295 2024-11-16T12:46:27,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39369 is added to blk_1073741841_1017 (size=4753) 2024-11-16T12:46:27,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44635 is added to blk_1073741841_1017 (size=4753) 2024-11-16T12:46:32,299 INFO [FSHLog-0-hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922-prefix:0450ab8807f5,33173,1731761127376 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44635,DS-a8e51288-43fd-49c8-b645-bb5e0d84bd08,DISK], DatanodeInfoWithStorage[127.0.0.1:39369,DS-5993f530-0256-445a-b7b9-7b5096d9dafd,DISK]] 2024-11-16T12:46:32,299 WARN [FSHLog-0-hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922-prefix:0450ab8807f5,33173,1731761127376 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44635,DS-a8e51288-43fd-49c8-b645-bb5e0d84bd08,DISK], DatanodeInfoWithStorage[127.0.0.1:39369,DS-5993f530-0256-445a-b7b9-7b5096d9dafd,DISK]] 2024-11-16T12:46:32,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(8855): Flush requested on 74dd56f7af29124799c6b75ebf614235 2024-11-16T12:46:32,300 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 74dd56f7af29124799c6b75ebf614235 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-16T12:46:32,311 INFO [regionserver/0450ab8807f5:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5012 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44635,DS-a8e51288-43fd-49c8-b645-bb5e0d84bd08,DISK], DatanodeInfoWithStorage[127.0.0.1:39369,DS-5993f530-0256-445a-b7b9-7b5096d9dafd,DISK]] 2024-11-16T12:46:32,311 WARN [regionserver/0450ab8807f5:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5012 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44635,DS-a8e51288-43fd-49c8-b645-bb5e0d84bd08,DISK], DatanodeInfoWithStorage[127.0.0.1:39369,DS-5993f530-0256-445a-b7b9-7b5096d9dafd,DISK]] 2024-11-16T12:46:34,301 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-16T12:46:37,303 INFO [FSHLog-0-hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922-prefix:0450ab8807f5,33173,1731761127376 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44635,DS-a8e51288-43fd-49c8-b645-bb5e0d84bd08,DISK], DatanodeInfoWithStorage[127.0.0.1:39369,DS-5993f530-0256-445a-b7b9-7b5096d9dafd,DISK]] 2024-11-16T12:46:37,303 WARN [FSHLog-0-hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922-prefix:0450ab8807f5,33173,1731761127376 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44635,DS-a8e51288-43fd-49c8-b645-bb5e0d84bd08,DISK], DatanodeInfoWithStorage[127.0.0.1:39369,DS-5993f530-0256-445a-b7b9-7b5096d9dafd,DISK]] 2024-11-16T12:46:37,303 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:46:37,303 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:46:37,304 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:46:37,304 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:46:37,304 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:46:37,304 INFO [regionserver/0450ab8807f5:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/WALs/0450ab8807f5,33173,1731761127376/0450ab8807f5%2C33173%2C1731761127376.1731761182281 with entries=2, filesize=1.52 KB; new WAL /user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/WALs/0450ab8807f5,33173,1731761127376/0450ab8807f5%2C33173%2C1731761127376.1731761187295 2024-11-16T12:46:37,306 DEBUG [regionserver/0450ab8807f5:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43667:43667),(127.0.0.1/127.0.0.1:39415:39415)] 2024-11-16T12:46:37,306 DEBUG [regionserver/0450ab8807f5:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/WALs/0450ab8807f5,33173,1731761127376/0450ab8807f5%2C33173%2C1731761127376.1731761182281 is not closed yet, will try archiving it next time 2024-11-16T12:46:37,306 DEBUG [regionserver/0450ab8807f5:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 0450ab8807f5%2C33173%2C1731761127376:(num 1731761187295) roll requested 2024-11-16T12:46:37,307 INFO [regionserver/0450ab8807f5:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0450ab8807f5%2C33173%2C1731761127376.1731761197307 2024-11-16T12:46:37,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39369 is added to blk_1073741842_1018 (size=1569) 2024-11-16T12:46:37,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44635 is added to blk_1073741842_1018 (size=1569) 2024-11-16T12:46:37,311 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/data/default/TestLogRolling-testSlowSyncLogRolling/74dd56f7af29124799c6b75ebf614235/.tmp/info/0c86867375eb4d209645759d85d82fa4 is 1080, key is row0015/info:/1731761169046/Put/seqid=0 2024-11-16T12:46:37,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44635 is added to blk_1073741844_1020 (size=12509) 2024-11-16T12:46:37,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39369 is added to blk_1073741844_1020 (size=12509) 2024-11-16T12:46:37,321 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=31 (bloomFilter=true), to=hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/data/default/TestLogRolling-testSlowSyncLogRolling/74dd56f7af29124799c6b75ebf614235/.tmp/info/0c86867375eb4d209645759d85d82fa4 2024-11-16T12:46:37,334 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/data/default/TestLogRolling-testSlowSyncLogRolling/74dd56f7af29124799c6b75ebf614235/.tmp/info/0c86867375eb4d209645759d85d82fa4 as hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/data/default/TestLogRolling-testSlowSyncLogRolling/74dd56f7af29124799c6b75ebf614235/info/0c86867375eb4d209645759d85d82fa4 2024-11-16T12:46:37,346 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/data/default/TestLogRolling-testSlowSyncLogRolling/74dd56f7af29124799c6b75ebf614235/info/0c86867375eb4d209645759d85d82fa4, entries=7, sequenceid=31, filesize=12.2 K 2024-11-16T12:46:42,318 INFO [regionserver/0450ab8807f5:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5008 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44635,DS-a8e51288-43fd-49c8-b645-bb5e0d84bd08,DISK], DatanodeInfoWithStorage[127.0.0.1:39369,DS-5993f530-0256-445a-b7b9-7b5096d9dafd,DISK]] 2024-11-16T12:46:42,318 WARN [regionserver/0450ab8807f5:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5008 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44635,DS-a8e51288-43fd-49c8-b645-bb5e0d84bd08,DISK], DatanodeInfoWithStorage[127.0.0.1:39369,DS-5993f530-0256-445a-b7b9-7b5096d9dafd,DISK]] 2024-11-16T12:46:42,348 INFO [FSHLog-0-hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922-prefix:0450ab8807f5,33173,1731761127376 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44635,DS-a8e51288-43fd-49c8-b645-bb5e0d84bd08,DISK], DatanodeInfoWithStorage[127.0.0.1:39369,DS-5993f530-0256-445a-b7b9-7b5096d9dafd,DISK]] 2024-11-16T12:46:42,348 WARN [FSHLog-0-hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922-prefix:0450ab8807f5,33173,1731761127376 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44635,DS-a8e51288-43fd-49c8-b645-bb5e0d84bd08,DISK], DatanodeInfoWithStorage[127.0.0.1:39369,DS-5993f530-0256-445a-b7b9-7b5096d9dafd,DISK]] 2024-11-16T12:46:42,348 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 74dd56f7af29124799c6b75ebf614235 in 10048ms, sequenceid=31, compaction requested=true 2024-11-16T12:46:42,348 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:46:42,348 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 74dd56f7af29124799c6b75ebf614235: 2024-11-16T12:46:42,348 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:46:42,348 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:46:42,348 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=36.6 K, sizeToCheck=16.0 K 2024-11-16T12:46:42,349 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:46:42,349 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T12:46:42,349 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:46:42,349 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/data/default/TestLogRolling-testSlowSyncLogRolling/74dd56f7af29124799c6b75ebf614235/info/5de943eceee242f2b30864e7e6e8a11a because midkey is the same as first or last row 2024-11-16T12:46:42,349 INFO [regionserver/0450ab8807f5:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/WALs/0450ab8807f5,33173,1731761127376/0450ab8807f5%2C33173%2C1731761127376.1731761187295 with entries=1, filesize=430 B; new WAL /user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/WALs/0450ab8807f5,33173,1731761127376/0450ab8807f5%2C33173%2C1731761127376.1731761197307 2024-11-16T12:46:42,350 DEBUG [regionserver/0450ab8807f5:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43667:43667),(127.0.0.1/127.0.0.1:39415:39415)] 2024-11-16T12:46:42,350 DEBUG [regionserver/0450ab8807f5:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/WALs/0450ab8807f5,33173,1731761127376/0450ab8807f5%2C33173%2C1731761127376.1731761187295 is not closed yet, will try archiving it next time 2024-11-16T12:46:42,351 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/WALs/0450ab8807f5,33173,1731761127376/0450ab8807f5%2C33173%2C1731761127376.1731761160007 to hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/oldWALs/0450ab8807f5%2C33173%2C1731761127376.1731761160007 2024-11-16T12:46:42,351 DEBUG [regionserver/0450ab8807f5:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 0450ab8807f5%2C33173%2C1731761127376:(num 1731761202351) roll requested 2024-11-16T12:46:42,351 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 74dd56f7af29124799c6b75ebf614235:info, priority=-2147483648, current under compaction store size is 1 2024-11-16T12:46:42,351 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0450ab8807f5%2C33173%2C1731761127376.1731761202351 2024-11-16T12:46:42,354 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T12:46:42,355 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/WALs/0450ab8807f5,33173,1731761127376/0450ab8807f5%2C33173%2C1731761127376.1731761171457 to hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/oldWALs/0450ab8807f5%2C33173%2C1731761127376.1731761171457 2024-11-16T12:46:42,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39369 is added to blk_1073741843_1019 (size=438) 2024-11-16T12:46:42,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44635 is added to blk_1073741843_1019 (size=438) 2024-11-16T12:46:42,357 DEBUG [RS:0;0450ab8807f5:33173-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-16T12:46:42,358 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/WALs/0450ab8807f5,33173,1731761127376/0450ab8807f5%2C33173%2C1731761127376.1731761182281 to hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/oldWALs/0450ab8807f5%2C33173%2C1731761127376.1731761182281 2024-11-16T12:46:42,361 DEBUG [RS:0;0450ab8807f5:33173-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37527 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-16T12:46:42,362 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:46:42,362 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:46:42,362 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:46:42,362 DEBUG [RS:0;0450ab8807f5:33173-shortCompactions-0 {}] regionserver.HStore(1541): 74dd56f7af29124799c6b75ebf614235/info is initiating minor compaction (all files) 2024-11-16T12:46:42,362 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:46:42,362 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:46:42,363 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/WALs/0450ab8807f5,33173,1731761127376/0450ab8807f5%2C33173%2C1731761127376.1731761197307 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/WALs/0450ab8807f5,33173,1731761127376/0450ab8807f5%2C33173%2C1731761127376.1731761202351 2024-11-16T12:46:42,363 INFO [RS:0;0450ab8807f5:33173-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 74dd56f7af29124799c6b75ebf614235/info in TestLogRolling-testSlowSyncLogRolling,,1731761129773.74dd56f7af29124799c6b75ebf614235. 2024-11-16T12:46:42,363 INFO [RS:0;0450ab8807f5:33173-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/data/default/TestLogRolling-testSlowSyncLogRolling/74dd56f7af29124799c6b75ebf614235/info/5de943eceee242f2b30864e7e6e8a11a, hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/data/default/TestLogRolling-testSlowSyncLogRolling/74dd56f7af29124799c6b75ebf614235/info/5c409731945a4b31b834446612ae4228, hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/data/default/TestLogRolling-testSlowSyncLogRolling/74dd56f7af29124799c6b75ebf614235/info/0c86867375eb4d209645759d85d82fa4] into tmpdir=hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/data/default/TestLogRolling-testSlowSyncLogRolling/74dd56f7af29124799c6b75ebf614235/.tmp, totalSize=36.6 K 2024-11-16T12:46:42,365 DEBUG [RS:0;0450ab8807f5:33173-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5de943eceee242f2b30864e7e6e8a11a, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731761139878 2024-11-16T12:46:42,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39369 is added to blk_1073741845_1021 (size=93) 2024-11-16T12:46:42,366 DEBUG [RS:0;0450ab8807f5:33173-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5c409731945a4b31b834446612ae4228, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=21, earliestPutTs=1731761153952 2024-11-16T12:46:42,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44635 is added to blk_1073741845_1021 (size=93) 2024-11-16T12:46:42,366 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/WALs/0450ab8807f5,33173,1731761127376/0450ab8807f5%2C33173%2C1731761127376.1731761187295 is not closed yet, will try archiving it next time 2024-11-16T12:46:42,367 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43667:43667),(127.0.0.1/127.0.0.1:39415:39415)] 2024-11-16T12:46:42,367 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/WALs/0450ab8807f5,33173,1731761127376/0450ab8807f5%2C33173%2C1731761127376.1731761197307 to hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/oldWALs/0450ab8807f5%2C33173%2C1731761127376.1731761197307 2024-11-16T12:46:42,367 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/WALs/0450ab8807f5,33173,1731761127376/0450ab8807f5%2C33173%2C1731761127376.1731761187295 is not closed yet, will try archiving it next time 2024-11-16T12:46:42,367 DEBUG [RS:0;0450ab8807f5:33173-shortCompactions-0 {}] compactions.Compactor(225): Compacting 0c86867375eb4d209645759d85d82fa4, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=31, earliestPutTs=1731761169046 2024-11-16T12:46:42,368 INFO [regionserver/0450ab8807f5:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0450ab8807f5%2C33173%2C1731761127376.1731761202367 2024-11-16T12:46:42,376 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:46:42,378 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:46:42,379 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:46:42,379 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:46:42,379 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:46:42,379 INFO [regionserver/0450ab8807f5:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/WALs/0450ab8807f5,33173,1731761127376/0450ab8807f5%2C33173%2C1731761127376.1731761202351 with entries=1, filesize=1.22 KB; new WAL /user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/WALs/0450ab8807f5,33173,1731761127376/0450ab8807f5%2C33173%2C1731761127376.1731761202367 2024-11-16T12:46:42,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39369 is added to blk_1073741846_1022 (size=1258) 2024-11-16T12:46:42,382 DEBUG [regionserver/0450ab8807f5:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39415:39415),(127.0.0.1/127.0.0.1:43667:43667)] 2024-11-16T12:46:42,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44635 is added to blk_1073741846_1022 (size=1258) 2024-11-16T12:46:42,382 DEBUG [regionserver/0450ab8807f5:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/WALs/0450ab8807f5,33173,1731761127376/0450ab8807f5%2C33173%2C1731761127376.1731761187295 is not closed yet, will try archiving it next time 2024-11-16T12:46:42,382 DEBUG [regionserver/0450ab8807f5:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/WALs/0450ab8807f5,33173,1731761127376/0450ab8807f5%2C33173%2C1731761127376.1731761202351 is not closed yet, will try archiving it next time 2024-11-16T12:46:42,383 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/WALs/0450ab8807f5,33173,1731761127376/0450ab8807f5%2C33173%2C1731761127376.1731761187295 is not closed yet, will try archiving it next time 2024-11-16T12:46:42,406 INFO [RS:0;0450ab8807f5:33173-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 74dd56f7af29124799c6b75ebf614235#info#compaction#3 average throughput is 10.77 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-16T12:46:42,407 DEBUG [RS:0;0450ab8807f5:33173-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/data/default/TestLogRolling-testSlowSyncLogRolling/74dd56f7af29124799c6b75ebf614235/.tmp/info/cb4089c016a74e07973cc1dbc2df02d9 is 1080, key is row0001/info:/1731761139878/Put/seqid=0 2024-11-16T12:46:42,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44635 is added to blk_1073741848_1024 (size=27710) 2024-11-16T12:46:42,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39369 is added to blk_1073741848_1024 (size=27710) 2024-11-16T12:46:42,431 DEBUG [RS:0;0450ab8807f5:33173-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/data/default/TestLogRolling-testSlowSyncLogRolling/74dd56f7af29124799c6b75ebf614235/.tmp/info/cb4089c016a74e07973cc1dbc2df02d9 as hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/data/default/TestLogRolling-testSlowSyncLogRolling/74dd56f7af29124799c6b75ebf614235/info/cb4089c016a74e07973cc1dbc2df02d9 2024-11-16T12:46:42,453 INFO [RS:0;0450ab8807f5:33173-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 74dd56f7af29124799c6b75ebf614235/info of 74dd56f7af29124799c6b75ebf614235 into cb4089c016a74e07973cc1dbc2df02d9(size=27.1 K), total size for store is 27.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-16T12:46:42,453 DEBUG [RS:0;0450ab8807f5:33173-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 74dd56f7af29124799c6b75ebf614235: 2024-11-16T12:46:42,456 INFO [RS:0;0450ab8807f5:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testSlowSyncLogRolling,,1731761129773.74dd56f7af29124799c6b75ebf614235., storeName=74dd56f7af29124799c6b75ebf614235/info, priority=13, startTime=1731761202351; duration=0sec 2024-11-16T12:46:42,456 DEBUG [RS:0;0450ab8807f5:33173-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-16T12:46:42,456 DEBUG [RS:0;0450ab8807f5:33173-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T12:46:42,456 DEBUG [RS:0;0450ab8807f5:33173-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/data/default/TestLogRolling-testSlowSyncLogRolling/74dd56f7af29124799c6b75ebf614235/info/cb4089c016a74e07973cc1dbc2df02d9 because midkey is the same as first or last row 2024-11-16T12:46:42,457 DEBUG [RS:0;0450ab8807f5:33173-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-16T12:46:42,457 DEBUG [RS:0;0450ab8807f5:33173-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T12:46:42,457 DEBUG [RS:0;0450ab8807f5:33173-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/data/default/TestLogRolling-testSlowSyncLogRolling/74dd56f7af29124799c6b75ebf614235/info/cb4089c016a74e07973cc1dbc2df02d9 because midkey is the same as first or last row 2024-11-16T12:46:42,457 DEBUG [RS:0;0450ab8807f5:33173-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-16T12:46:42,457 DEBUG [RS:0;0450ab8807f5:33173-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T12:46:42,457 DEBUG [RS:0;0450ab8807f5:33173-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/data/default/TestLogRolling-testSlowSyncLogRolling/74dd56f7af29124799c6b75ebf614235/info/cb4089c016a74e07973cc1dbc2df02d9 because midkey is the same as first or last row 2024-11-16T12:46:42,457 DEBUG [RS:0;0450ab8807f5:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T12:46:42,458 DEBUG [RS:0;0450ab8807f5:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 74dd56f7af29124799c6b75ebf614235:info 2024-11-16T12:46:42,755 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/WALs/0450ab8807f5,33173,1731761127376/0450ab8807f5%2C33173%2C1731761127376.1731761187295 to hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/oldWALs/0450ab8807f5%2C33173%2C1731761127376.1731761187295 2024-11-16T12:46:54,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(8855): Flush requested on 74dd56f7af29124799c6b75ebf614235 2024-11-16T12:46:54,400 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 74dd56f7af29124799c6b75ebf614235 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-16T12:46:54,407 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/data/default/TestLogRolling-testSlowSyncLogRolling/74dd56f7af29124799c6b75ebf614235/.tmp/info/307661ff855843d496c2034fc0153016 is 1080, key is row0022/info:/1731761202369/Put/seqid=0 2024-11-16T12:46:54,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44635 is added to blk_1073741849_1025 (size=12509) 2024-11-16T12:46:54,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39369 is added to blk_1073741849_1025 (size=12509) 2024-11-16T12:46:54,414 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/data/default/TestLogRolling-testSlowSyncLogRolling/74dd56f7af29124799c6b75ebf614235/.tmp/info/307661ff855843d496c2034fc0153016 2024-11-16T12:46:54,425 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/data/default/TestLogRolling-testSlowSyncLogRolling/74dd56f7af29124799c6b75ebf614235/.tmp/info/307661ff855843d496c2034fc0153016 as hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/data/default/TestLogRolling-testSlowSyncLogRolling/74dd56f7af29124799c6b75ebf614235/info/307661ff855843d496c2034fc0153016 2024-11-16T12:46:54,434 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/data/default/TestLogRolling-testSlowSyncLogRolling/74dd56f7af29124799c6b75ebf614235/info/307661ff855843d496c2034fc0153016, entries=7, sequenceid=42, filesize=12.2 K 2024-11-16T12:46:54,436 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 74dd56f7af29124799c6b75ebf614235 in 37ms, sequenceid=42, compaction requested=false 2024-11-16T12:46:54,436 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 74dd56f7af29124799c6b75ebf614235: 2024-11-16T12:46:54,436 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=39.3 K, sizeToCheck=16.0 K 2024-11-16T12:46:54,436 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T12:46:54,436 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/data/default/TestLogRolling-testSlowSyncLogRolling/74dd56f7af29124799c6b75ebf614235/info/cb4089c016a74e07973cc1dbc2df02d9 because midkey is the same as first or last row 2024-11-16T12:46:55,761 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-16T12:47:00,244 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 74dd56f7af29124799c6b75ebf614235, had cached 0 bytes from a total of 40219 2024-11-16T12:47:02,411 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-16T12:47:02,411 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-16T12:47:02,412 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T12:47:02,418 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T12:47:02,418 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T12:47:02,418 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-16T12:47:02,419 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-16T12:47:02,419 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=245640440, stopped=false 2024-11-16T12:47:02,419 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=0450ab8807f5,34847,1731761126600 2024-11-16T12:47:02,447 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33173-0x10144f74c720001, quorum=127.0.0.1:57386, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-16T12:47:02,447 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34847-0x10144f74c720000, quorum=127.0.0.1:57386, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-16T12:47:02,447 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34847-0x10144f74c720000, quorum=127.0.0.1:57386, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:47:02,447 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33173-0x10144f74c720001, quorum=127.0.0.1:57386, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:47:02,448 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-16T12:47:02,448 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-16T12:47:02,448 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:33173-0x10144f74c720001, quorum=127.0.0.1:57386, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T12:47:02,448 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:34847-0x10144f74c720000, quorum=127.0.0.1:57386, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T12:47:02,448 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T12:47:02,449 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T12:47:02,449 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '0450ab8807f5,33173,1731761127376' ***** 2024-11-16T12:47:02,449 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-16T12:47:02,450 INFO [RS:0;0450ab8807f5:33173 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-16T12:47:02,450 INFO [RS:0;0450ab8807f5:33173 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-16T12:47:02,450 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-16T12:47:02,450 INFO [RS:0;0450ab8807f5:33173 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-16T12:47:02,450 INFO [RS:0;0450ab8807f5:33173 {}] regionserver.HRegionServer(3091): Received CLOSE for 74dd56f7af29124799c6b75ebf614235 2024-11-16T12:47:02,451 INFO [RS:0;0450ab8807f5:33173 {}] regionserver.HRegionServer(959): stopping server 0450ab8807f5,33173,1731761127376 2024-11-16T12:47:02,451 INFO [RS:0;0450ab8807f5:33173 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-16T12:47:02,451 INFO [RS:0;0450ab8807f5:33173 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;0450ab8807f5:33173. 2024-11-16T12:47:02,451 DEBUG [RS:0;0450ab8807f5:33173 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T12:47:02,451 DEBUG [RS:0;0450ab8807f5:33173 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T12:47:02,451 DEBUG [RS_CLOSE_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 74dd56f7af29124799c6b75ebf614235, disabling compactions & flushes 2024-11-16T12:47:02,452 INFO [RS:0;0450ab8807f5:33173 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-16T12:47:02,452 INFO [RS_CLOSE_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1731761129773.74dd56f7af29124799c6b75ebf614235. 2024-11-16T12:47:02,452 INFO [RS:0;0450ab8807f5:33173 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-16T12:47:02,452 DEBUG [RS_CLOSE_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1731761129773.74dd56f7af29124799c6b75ebf614235. 2024-11-16T12:47:02,452 INFO [RS:0;0450ab8807f5:33173 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-16T12:47:02,452 DEBUG [RS_CLOSE_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1731761129773.74dd56f7af29124799c6b75ebf614235. after waiting 0 ms 2024-11-16T12:47:02,452 DEBUG [RS_CLOSE_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1731761129773.74dd56f7af29124799c6b75ebf614235. 2024-11-16T12:47:02,452 INFO [RS:0;0450ab8807f5:33173 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-16T12:47:02,452 INFO [RS_CLOSE_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 74dd56f7af29124799c6b75ebf614235 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-11-16T12:47:02,452 INFO [RS:0;0450ab8807f5:33173 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-16T12:47:02,452 DEBUG [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-16T12:47:02,452 DEBUG [RS:0;0450ab8807f5:33173 {}] regionserver.HRegionServer(1325): Online Regions={74dd56f7af29124799c6b75ebf614235=TestLogRolling-testSlowSyncLogRolling,,1731761129773.74dd56f7af29124799c6b75ebf614235., 1588230740=hbase:meta,,1.1588230740} 2024-11-16T12:47:02,452 INFO [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-16T12:47:02,453 DEBUG [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-16T12:47:02,453 DEBUG [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-16T12:47:02,453 DEBUG [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-16T12:47:02,453 INFO [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.65 KB heapSize=3.67 KB 2024-11-16T12:47:02,453 DEBUG [RS:0;0450ab8807f5:33173 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 74dd56f7af29124799c6b75ebf614235 2024-11-16T12:47:02,463 DEBUG [RS_CLOSE_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/data/default/TestLogRolling-testSlowSyncLogRolling/74dd56f7af29124799c6b75ebf614235/.tmp/info/706e0d232da84b698737d09ebc51a034 is 1080, key is row0029/info:/1731761216402/Put/seqid=0 2024-11-16T12:47:02,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39369 is added to blk_1073741850_1026 (size=8193) 2024-11-16T12:47:02,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44635 is added to blk_1073741850_1026 (size=8193) 2024-11-16T12:47:02,479 INFO [RS_CLOSE_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/data/default/TestLogRolling-testSlowSyncLogRolling/74dd56f7af29124799c6b75ebf614235/.tmp/info/706e0d232da84b698737d09ebc51a034 2024-11-16T12:47:02,485 DEBUG [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/data/hbase/meta/1588230740/.tmp/info/6bb3b599855c46d59642f7df51b804a6 is 195, key is TestLogRolling-testSlowSyncLogRolling,,1731761129773.74dd56f7af29124799c6b75ebf614235./info:regioninfo/1731761130267/Put/seqid=0 2024-11-16T12:47:02,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44635 is added to blk_1073741851_1027 (size=7016) 2024-11-16T12:47:02,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39369 is added to blk_1073741851_1027 (size=7016) 2024-11-16T12:47:02,499 DEBUG [RS_CLOSE_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/data/default/TestLogRolling-testSlowSyncLogRolling/74dd56f7af29124799c6b75ebf614235/.tmp/info/706e0d232da84b698737d09ebc51a034 as hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/data/default/TestLogRolling-testSlowSyncLogRolling/74dd56f7af29124799c6b75ebf614235/info/706e0d232da84b698737d09ebc51a034 2024-11-16T12:47:02,499 INFO [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.45 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/data/hbase/meta/1588230740/.tmp/info/6bb3b599855c46d59642f7df51b804a6 2024-11-16T12:47:02,517 INFO [RS_CLOSE_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/data/default/TestLogRolling-testSlowSyncLogRolling/74dd56f7af29124799c6b75ebf614235/info/706e0d232da84b698737d09ebc51a034, entries=3, sequenceid=48, filesize=8.0 K 2024-11-16T12:47:02,520 INFO [RS_CLOSE_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 74dd56f7af29124799c6b75ebf614235 in 68ms, sequenceid=48, compaction requested=true 2024-11-16T12:47:02,525 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731761129773.74dd56f7af29124799c6b75ebf614235.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/data/default/TestLogRolling-testSlowSyncLogRolling/74dd56f7af29124799c6b75ebf614235/info/5de943eceee242f2b30864e7e6e8a11a, hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/data/default/TestLogRolling-testSlowSyncLogRolling/74dd56f7af29124799c6b75ebf614235/info/5c409731945a4b31b834446612ae4228, hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/data/default/TestLogRolling-testSlowSyncLogRolling/74dd56f7af29124799c6b75ebf614235/info/0c86867375eb4d209645759d85d82fa4] to archive 2024-11-16T12:47:02,531 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731761129773.74dd56f7af29124799c6b75ebf614235.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-16T12:47:02,537 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731761129773.74dd56f7af29124799c6b75ebf614235.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/data/default/TestLogRolling-testSlowSyncLogRolling/74dd56f7af29124799c6b75ebf614235/info/5de943eceee242f2b30864e7e6e8a11a to hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/archive/data/default/TestLogRolling-testSlowSyncLogRolling/74dd56f7af29124799c6b75ebf614235/info/5de943eceee242f2b30864e7e6e8a11a 2024-11-16T12:47:02,540 DEBUG [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/data/hbase/meta/1588230740/.tmp/ns/a897b0693db143ceb2fdf1e3c7a579b9 is 43, key is default/ns:d/1731761129505/Put/seqid=0 2024-11-16T12:47:02,541 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731761129773.74dd56f7af29124799c6b75ebf614235.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/data/default/TestLogRolling-testSlowSyncLogRolling/74dd56f7af29124799c6b75ebf614235/info/5c409731945a4b31b834446612ae4228 to hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/archive/data/default/TestLogRolling-testSlowSyncLogRolling/74dd56f7af29124799c6b75ebf614235/info/5c409731945a4b31b834446612ae4228 2024-11-16T12:47:02,544 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731761129773.74dd56f7af29124799c6b75ebf614235.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/data/default/TestLogRolling-testSlowSyncLogRolling/74dd56f7af29124799c6b75ebf614235/info/0c86867375eb4d209645759d85d82fa4 to hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/archive/data/default/TestLogRolling-testSlowSyncLogRolling/74dd56f7af29124799c6b75ebf614235/info/0c86867375eb4d209645759d85d82fa4 2024-11-16T12:47:02,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39369 is added to blk_1073741852_1028 (size=5153) 2024-11-16T12:47:02,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44635 is added to blk_1073741852_1028 (size=5153) 2024-11-16T12:47:02,565 INFO [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/data/hbase/meta/1588230740/.tmp/ns/a897b0693db143ceb2fdf1e3c7a579b9 2024-11-16T12:47:02,560 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731761129773.74dd56f7af29124799c6b75ebf614235.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=0450ab8807f5:34847 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-11-16T12:47:02,566 WARN [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731761129773.74dd56f7af29124799c6b75ebf614235.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [5de943eceee242f2b30864e7e6e8a11a=12509, 5c409731945a4b31b834446612ae4228=12509, 0c86867375eb4d209645759d85d82fa4=12509] 2024-11-16T12:47:02,578 DEBUG [RS_CLOSE_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/data/default/TestLogRolling-testSlowSyncLogRolling/74dd56f7af29124799c6b75ebf614235/recovered.edits/51.seqid, newMaxSeqId=51, maxSeqId=1 2024-11-16T12:47:02,581 INFO [RS_CLOSE_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1731761129773.74dd56f7af29124799c6b75ebf614235. 2024-11-16T12:47:02,581 DEBUG [RS_CLOSE_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 74dd56f7af29124799c6b75ebf614235: Waiting for close lock at 1731761222451Running coprocessor pre-close hooks at 1731761222451Disabling compacts and flushes for region at 1731761222451Disabling writes for close at 1731761222452 (+1 ms)Obtaining lock to block concurrent updates at 1731761222452Preparing flush snapshotting stores in 74dd56f7af29124799c6b75ebf614235 at 1731761222452Finished memstore snapshotting TestLogRolling-testSlowSyncLogRolling,,1731761129773.74dd56f7af29124799c6b75ebf614235., syncing WAL and waiting on mvcc, flushsize=dataSize=3228, getHeapSize=3696, getOffHeapSize=0, getCellsCount=3 at 1731761222452Flushing stores of TestLogRolling-testSlowSyncLogRolling,,1731761129773.74dd56f7af29124799c6b75ebf614235. at 1731761222454 (+2 ms)Flushing 74dd56f7af29124799c6b75ebf614235/info: creating writer at 1731761222454Flushing 74dd56f7af29124799c6b75ebf614235/info: appending metadata at 1731761222463 (+9 ms)Flushing 74dd56f7af29124799c6b75ebf614235/info: closing flushed file at 1731761222463Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@74a05364: reopening flushed file at 1731761222497 (+34 ms)Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 74dd56f7af29124799c6b75ebf614235 in 68ms, sequenceid=48, compaction requested=true at 1731761222520 (+23 ms)Writing region close event to WAL at 1731761222567 (+47 ms)Running coprocessor post-close hooks at 1731761222579 (+12 ms)Closed at 1731761222581 (+2 ms) 2024-11-16T12:47:02,582 DEBUG [RS_CLOSE_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testSlowSyncLogRolling,,1731761129773.74dd56f7af29124799c6b75ebf614235. 2024-11-16T12:47:02,603 DEBUG [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/data/hbase/meta/1588230740/.tmp/table/4c75b81eeae54a39a83d8fbad0d156df is 73, key is TestLogRolling-testSlowSyncLogRolling/table:state/1731761130283/Put/seqid=0 2024-11-16T12:47:02,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44635 is added to blk_1073741853_1029 (size=5396) 2024-11-16T12:47:02,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39369 is added to blk_1073741853_1029 (size=5396) 2024-11-16T12:47:02,618 INFO [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=138 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/data/hbase/meta/1588230740/.tmp/table/4c75b81eeae54a39a83d8fbad0d156df 2024-11-16T12:47:02,638 DEBUG [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/data/hbase/meta/1588230740/.tmp/info/6bb3b599855c46d59642f7df51b804a6 as hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/data/hbase/meta/1588230740/info/6bb3b599855c46d59642f7df51b804a6 2024-11-16T12:47:02,651 INFO [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/data/hbase/meta/1588230740/info/6bb3b599855c46d59642f7df51b804a6, entries=10, sequenceid=11, filesize=6.9 K 2024-11-16T12:47:02,653 DEBUG [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/data/hbase/meta/1588230740/.tmp/ns/a897b0693db143ceb2fdf1e3c7a579b9 as hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/data/hbase/meta/1588230740/ns/a897b0693db143ceb2fdf1e3c7a579b9 2024-11-16T12:47:02,653 DEBUG [RS:0;0450ab8807f5:33173 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-16T12:47:02,664 INFO [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/data/hbase/meta/1588230740/ns/a897b0693db143ceb2fdf1e3c7a579b9, entries=2, sequenceid=11, filesize=5.0 K 2024-11-16T12:47:02,666 DEBUG [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/data/hbase/meta/1588230740/.tmp/table/4c75b81eeae54a39a83d8fbad0d156df as hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/data/hbase/meta/1588230740/table/4c75b81eeae54a39a83d8fbad0d156df 2024-11-16T12:47:02,677 INFO [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/data/hbase/meta/1588230740/table/4c75b81eeae54a39a83d8fbad0d156df, entries=2, sequenceid=11, filesize=5.3 K 2024-11-16T12:47:02,679 INFO [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 225ms, sequenceid=11, compaction requested=false 2024-11-16T12:47:02,688 DEBUG [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-16T12:47:02,689 DEBUG [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-16T12:47:02,689 INFO [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-16T12:47:02,690 DEBUG [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731761222452Running coprocessor pre-close hooks at 1731761222452Disabling compacts and flushes for region at 1731761222452Disabling writes for close at 1731761222453 (+1 ms)Obtaining lock to block concurrent updates at 1731761222453Preparing flush snapshotting stores in 1588230740 at 1731761222453Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1692, getHeapSize=3696, getOffHeapSize=0, getCellsCount=14 at 1731761222453Flushing stores of hbase:meta,,1.1588230740 at 1731761222454 (+1 ms)Flushing 1588230740/info: creating writer at 1731761222454Flushing 1588230740/info: appending metadata at 1731761222484 (+30 ms)Flushing 1588230740/info: closing flushed file at 1731761222484Flushing 1588230740/ns: creating writer at 1731761222513 (+29 ms)Flushing 1588230740/ns: appending metadata at 1731761222539 (+26 ms)Flushing 1588230740/ns: closing flushed file at 1731761222539Flushing 1588230740/table: creating writer at 1731761222577 (+38 ms)Flushing 1588230740/table: appending metadata at 1731761222602 (+25 ms)Flushing 1588230740/table: closing flushed file at 1731761222602Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@25ba0f6a: reopening flushed file at 1731761222632 (+30 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6288cda8: reopening flushed file at 1731761222651 (+19 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7cd18eb4: reopening flushed file at 1731761222664 (+13 ms)Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 225ms, sequenceid=11, compaction requested=false at 1731761222679 (+15 ms)Writing region close event to WAL at 1731761222681 (+2 ms)Running coprocessor post-close hooks at 1731761222689 (+8 ms)Closed at 1731761222689 2024-11-16T12:47:02,690 DEBUG [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-16T12:47:02,746 INFO [regionserver/0450ab8807f5:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-16T12:47:02,798 INFO [regionserver/0450ab8807f5:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-16T12:47:02,798 INFO [regionserver/0450ab8807f5:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-16T12:47:02,853 INFO [RS:0;0450ab8807f5:33173 {}] regionserver.HRegionServer(976): stopping server 0450ab8807f5,33173,1731761127376; all regions closed. 2024-11-16T12:47:02,855 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:47:02,855 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:47:02,855 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:47:02,855 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:47:02,855 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:47:02,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44635 is added to blk_1073741834_1010 (size=3066) 2024-11-16T12:47:02,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39369 is added to blk_1073741834_1010 (size=3066) 2024-11-16T12:47:02,861 DEBUG [RS:0;0450ab8807f5:33173 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/oldWALs 2024-11-16T12:47:02,861 INFO [RS:0;0450ab8807f5:33173 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 0450ab8807f5%2C33173%2C1731761127376.meta:.meta(num 1731761129317) 2024-11-16T12:47:02,862 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:47:02,862 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:47:02,862 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:47:02,862 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:47:02,862 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:47:02,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44635 is added to blk_1073741847_1023 (size=12695) 2024-11-16T12:47:02,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39369 is added to blk_1073741847_1023 (size=12695) 2024-11-16T12:47:02,870 DEBUG [RS:0;0450ab8807f5:33173 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/oldWALs 2024-11-16T12:47:02,870 INFO [RS:0;0450ab8807f5:33173 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 0450ab8807f5%2C33173%2C1731761127376:(num 1731761202367) 2024-11-16T12:47:02,870 DEBUG [RS:0;0450ab8807f5:33173 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T12:47:02,870 INFO [RS:0;0450ab8807f5:33173 {}] regionserver.LeaseManager(133): Closed leases 2024-11-16T12:47:02,871 INFO [RS:0;0450ab8807f5:33173 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-16T12:47:02,871 INFO [RS:0;0450ab8807f5:33173 {}] hbase.ChoreService(370): Chore service for: regionserver/0450ab8807f5:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-16T12:47:02,871 INFO [RS:0;0450ab8807f5:33173 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-16T12:47:02,871 INFO [regionserver/0450ab8807f5:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-16T12:47:02,872 INFO [RS:0;0450ab8807f5:33173 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33173 2024-11-16T12:47:02,889 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33173-0x10144f74c720001, quorum=127.0.0.1:57386, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/0450ab8807f5,33173,1731761127376 2024-11-16T12:47:02,889 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34847-0x10144f74c720000, quorum=127.0.0.1:57386, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-16T12:47:02,889 INFO [RS:0;0450ab8807f5:33173 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-16T12:47:02,906 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [0450ab8807f5,33173,1731761127376] 2024-11-16T12:47:02,922 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/0450ab8807f5,33173,1731761127376 already deleted, retry=false 2024-11-16T12:47:02,923 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 0450ab8807f5,33173,1731761127376 expired; onlineServers=0 2024-11-16T12:47:02,923 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '0450ab8807f5,34847,1731761126600' ***** 2024-11-16T12:47:02,923 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-16T12:47:02,923 INFO [M:0;0450ab8807f5:34847 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-16T12:47:02,924 INFO [M:0;0450ab8807f5:34847 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-16T12:47:02,924 DEBUG [M:0;0450ab8807f5:34847 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-16T12:47:02,924 DEBUG [M:0;0450ab8807f5:34847 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-16T12:47:02,924 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-16T12:47:02,924 DEBUG [master/0450ab8807f5:0:becomeActiveMaster-HFileCleaner.large.0-1731761128541 {}] cleaner.HFileCleaner(306): Exit Thread[master/0450ab8807f5:0:becomeActiveMaster-HFileCleaner.large.0-1731761128541,5,FailOnTimeoutGroup] 2024-11-16T12:47:02,924 DEBUG [master/0450ab8807f5:0:becomeActiveMaster-HFileCleaner.small.0-1731761128542 {}] cleaner.HFileCleaner(306): Exit Thread[master/0450ab8807f5:0:becomeActiveMaster-HFileCleaner.small.0-1731761128542,5,FailOnTimeoutGroup] 2024-11-16T12:47:02,925 INFO [M:0;0450ab8807f5:34847 {}] hbase.ChoreService(370): Chore service for: master/0450ab8807f5:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-16T12:47:02,925 INFO [M:0;0450ab8807f5:34847 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-16T12:47:02,925 DEBUG [M:0;0450ab8807f5:34847 {}] master.HMaster(1795): Stopping service threads 2024-11-16T12:47:02,925 INFO [M:0;0450ab8807f5:34847 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-16T12:47:02,926 INFO [M:0;0450ab8807f5:34847 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-16T12:47:02,927 INFO [M:0;0450ab8807f5:34847 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-16T12:47:02,927 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-16T12:47:02,939 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34847-0x10144f74c720000, quorum=127.0.0.1:57386, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-16T12:47:02,939 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34847-0x10144f74c720000, quorum=127.0.0.1:57386, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:47:02,939 DEBUG [M:0;0450ab8807f5:34847 {}] zookeeper.ZKUtil(347): master:34847-0x10144f74c720000, quorum=127.0.0.1:57386, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-16T12:47:02,940 WARN [M:0;0450ab8807f5:34847 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-16T12:47:02,941 INFO [M:0;0450ab8807f5:34847 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/.lastflushedseqids 2024-11-16T12:47:02,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39369 is added to blk_1073741854_1030 (size=130) 2024-11-16T12:47:02,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44635 is added to blk_1073741854_1030 (size=130) 2024-11-16T12:47:02,961 INFO [M:0;0450ab8807f5:34847 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-16T12:47:02,961 INFO [M:0;0450ab8807f5:34847 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-16T12:47:02,961 DEBUG [M:0;0450ab8807f5:34847 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-16T12:47:02,961 INFO [M:0;0450ab8807f5:34847 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T12:47:02,961 DEBUG [M:0;0450ab8807f5:34847 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T12:47:02,961 DEBUG [M:0;0450ab8807f5:34847 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-16T12:47:02,961 DEBUG [M:0;0450ab8807f5:34847 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T12:47:02,961 INFO [M:0;0450ab8807f5:34847 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.02 KB heapSize=29.20 KB 2024-11-16T12:47:02,982 DEBUG [M:0;0450ab8807f5:34847 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a223dba19acf419cb84c79b04e26b471 is 82, key is hbase:meta,,1/info:regioninfo/1731761129386/Put/seqid=0 2024-11-16T12:47:02,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44635 is added to blk_1073741855_1031 (size=5672) 2024-11-16T12:47:02,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39369 is added to blk_1073741855_1031 (size=5672) 2024-11-16T12:47:02,990 INFO [M:0;0450ab8807f5:34847 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a223dba19acf419cb84c79b04e26b471 2024-11-16T12:47:03,006 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33173-0x10144f74c720001, quorum=127.0.0.1:57386, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T12:47:03,006 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33173-0x10144f74c720001, quorum=127.0.0.1:57386, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T12:47:03,007 INFO [RS:0;0450ab8807f5:33173 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-16T12:47:03,007 INFO [RS:0;0450ab8807f5:33173 {}] regionserver.HRegionServer(1031): Exiting; stopping=0450ab8807f5,33173,1731761127376; zookeeper connection closed. 2024-11-16T12:47:03,007 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@3e941163 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@3e941163 2024-11-16T12:47:03,008 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-16T12:47:03,017 DEBUG [M:0;0450ab8807f5:34847 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/8dbd04f0cd65454f8bf9b7c28306d6fa is 766, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731761130292/Put/seqid=0 2024-11-16T12:47:03,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44635 is added to blk_1073741856_1032 (size=6247) 2024-11-16T12:47:03,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39369 is added to blk_1073741856_1032 (size=6247) 2024-11-16T12:47:03,024 INFO [M:0;0450ab8807f5:34847 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.42 KB at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/8dbd04f0cd65454f8bf9b7c28306d6fa 2024-11-16T12:47:03,031 INFO [M:0;0450ab8807f5:34847 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 8dbd04f0cd65454f8bf9b7c28306d6fa 2024-11-16T12:47:03,051 DEBUG [M:0;0450ab8807f5:34847 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/6377614cc6eb4a0186155971e44aeef2 is 69, key is 0450ab8807f5,33173,1731761127376/rs:state/1731761128638/Put/seqid=0 2024-11-16T12:47:03,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39369 is added to blk_1073741857_1033 (size=5156) 2024-11-16T12:47:03,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44635 is added to blk_1073741857_1033 (size=5156) 2024-11-16T12:47:03,059 INFO [M:0;0450ab8807f5:34847 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/6377614cc6eb4a0186155971e44aeef2 2024-11-16T12:47:03,084 DEBUG [M:0;0450ab8807f5:34847 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/c57382d4c8f642a98b62b01d3681039d is 52, key is load_balancer_on/state:d/1731761129754/Put/seqid=0 2024-11-16T12:47:03,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39369 is added to blk_1073741858_1034 (size=5056) 2024-11-16T12:47:03,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44635 is added to blk_1073741858_1034 (size=5056) 2024-11-16T12:47:03,090 INFO [M:0;0450ab8807f5:34847 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/c57382d4c8f642a98b62b01d3681039d 2024-11-16T12:47:03,100 DEBUG [M:0;0450ab8807f5:34847 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a223dba19acf419cb84c79b04e26b471 as hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/a223dba19acf419cb84c79b04e26b471 2024-11-16T12:47:03,107 INFO [M:0;0450ab8807f5:34847 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/a223dba19acf419cb84c79b04e26b471, entries=8, sequenceid=59, filesize=5.5 K 2024-11-16T12:47:03,109 DEBUG [M:0;0450ab8807f5:34847 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/8dbd04f0cd65454f8bf9b7c28306d6fa as hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/8dbd04f0cd65454f8bf9b7c28306d6fa 2024-11-16T12:47:03,116 INFO [M:0;0450ab8807f5:34847 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 8dbd04f0cd65454f8bf9b7c28306d6fa 2024-11-16T12:47:03,116 INFO [M:0;0450ab8807f5:34847 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/8dbd04f0cd65454f8bf9b7c28306d6fa, entries=6, sequenceid=59, filesize=6.1 K 2024-11-16T12:47:03,118 DEBUG [M:0;0450ab8807f5:34847 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/6377614cc6eb4a0186155971e44aeef2 as hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/6377614cc6eb4a0186155971e44aeef2 2024-11-16T12:47:03,126 INFO [M:0;0450ab8807f5:34847 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/6377614cc6eb4a0186155971e44aeef2, entries=1, sequenceid=59, filesize=5.0 K 2024-11-16T12:47:03,127 DEBUG [M:0;0450ab8807f5:34847 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/c57382d4c8f642a98b62b01d3681039d as hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/c57382d4c8f642a98b62b01d3681039d 2024-11-16T12:47:03,134 INFO [M:0;0450ab8807f5:34847 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/c57382d4c8f642a98b62b01d3681039d, entries=1, sequenceid=59, filesize=4.9 K 2024-11-16T12:47:03,136 INFO [M:0;0450ab8807f5:34847 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.02 KB/23576, heapSize ~29.14 KB/29840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 175ms, sequenceid=59, compaction requested=false 2024-11-16T12:47:03,139 INFO [M:0;0450ab8807f5:34847 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T12:47:03,139 DEBUG [M:0;0450ab8807f5:34847 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731761222961Disabling compacts and flushes for region at 1731761222961Disabling writes for close at 1731761222961Obtaining lock to block concurrent updates at 1731761222962 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731761222962Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23576, getHeapSize=29840, getOffHeapSize=0, getCellsCount=70 at 1731761222962Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731761222963 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731761222963Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731761222981 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731761222981Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731761222998 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731761223016 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731761223016Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731761223031 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731761223050 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731761223051 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731761223068 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731761223083 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731761223083Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@76ebf01d: reopening flushed file at 1731761223098 (+15 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@16d6c91f: reopening flushed file at 1731761223108 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@77af3ef0: reopening flushed file at 1731761223116 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@661c5613: reopening flushed file at 1731761223126 (+10 ms)Finished flush of dataSize ~23.02 KB/23576, heapSize ~29.14 KB/29840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 175ms, sequenceid=59, compaction requested=false at 1731761223136 (+10 ms)Writing region close event to WAL at 1731761223139 (+3 ms)Closed at 1731761223139 2024-11-16T12:47:03,140 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:47:03,140 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:47:03,140 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:47:03,141 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:47:03,141 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:47:03,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44635 is added to blk_1073741830_1006 (size=27973) 2024-11-16T12:47:03,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39369 is added to blk_1073741830_1006 (size=27973) 2024-11-16T12:47:03,144 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-16T12:47:03,144 INFO [M:0;0450ab8807f5:34847 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-16T12:47:03,145 INFO [M:0;0450ab8807f5:34847 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34847 2024-11-16T12:47:03,145 INFO [M:0;0450ab8807f5:34847 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-16T12:47:03,261 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34847-0x10144f74c720000, quorum=127.0.0.1:57386, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T12:47:03,261 INFO [M:0;0450ab8807f5:34847 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-16T12:47:03,261 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34847-0x10144f74c720000, quorum=127.0.0.1:57386, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T12:47:03,266 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1bf97579{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T12:47:03,269 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@22b88bcb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T12:47:03,269 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T12:47:03,269 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2d48d695{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T12:47:03,270 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@11effdcd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/edd6923c-0677-90e0-cc1c-9bbca94963aa/hadoop.log.dir/,STOPPED} 2024-11-16T12:47:03,273 WARN [BP-335813976-172.17.0.2-1731761123019 heartbeating to localhost/127.0.0.1:36417 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T12:47:03,273 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T12:47:03,273 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T12:47:03,273 WARN [BP-335813976-172.17.0.2-1731761123019 heartbeating to localhost/127.0.0.1:36417 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-335813976-172.17.0.2-1731761123019 (Datanode Uuid b1151d9b-99ed-41d9-bf9c-a2d4affbbce3) service to localhost/127.0.0.1:36417 2024-11-16T12:47:03,274 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/edd6923c-0677-90e0-cc1c-9bbca94963aa/cluster_b35c52b3-6cb1-7a8b-5887-65afa3b95417/data/data3/current/BP-335813976-172.17.0.2-1731761123019 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T12:47:03,274 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/edd6923c-0677-90e0-cc1c-9bbca94963aa/cluster_b35c52b3-6cb1-7a8b-5887-65afa3b95417/data/data4/current/BP-335813976-172.17.0.2-1731761123019 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T12:47:03,275 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T12:47:03,281 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7b07d1ba{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T12:47:03,282 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@43e0a762{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T12:47:03,282 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T12:47:03,282 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@371e191c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T12:47:03,282 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@28778f0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/edd6923c-0677-90e0-cc1c-9bbca94963aa/hadoop.log.dir/,STOPPED} 2024-11-16T12:47:03,283 WARN [BP-335813976-172.17.0.2-1731761123019 heartbeating to localhost/127.0.0.1:36417 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T12:47:03,283 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T12:47:03,283 WARN [BP-335813976-172.17.0.2-1731761123019 heartbeating to localhost/127.0.0.1:36417 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-335813976-172.17.0.2-1731761123019 (Datanode Uuid ae7f4d77-29d3-4e05-9545-1d08a753d915) service to localhost/127.0.0.1:36417 2024-11-16T12:47:03,283 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T12:47:03,284 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/edd6923c-0677-90e0-cc1c-9bbca94963aa/cluster_b35c52b3-6cb1-7a8b-5887-65afa3b95417/data/data1/current/BP-335813976-172.17.0.2-1731761123019 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T12:47:03,284 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/edd6923c-0677-90e0-cc1c-9bbca94963aa/cluster_b35c52b3-6cb1-7a8b-5887-65afa3b95417/data/data2/current/BP-335813976-172.17.0.2-1731761123019 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T12:47:03,285 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T12:47:03,294 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@735fa16a{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-16T12:47:03,295 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T12:47:03,295 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T12:47:03,295 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T12:47:03,295 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/edd6923c-0677-90e0-cc1c-9bbca94963aa/hadoop.log.dir/,STOPPED} 2024-11-16T12:47:03,305 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-16T12:47:03,347 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-16T12:47:03,360 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=80 (was 12) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.hdfs.PeerCache@1ab9ef81 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36417 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: regionserver/0450ab8807f5:0.procedureResultReporter java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Potentially hanging thread: nioEventLoopGroup-3-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RpcClient-timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:36417 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins.hfs.0@localhost:36417 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: SSL Certificates Store Monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:36417 from jenkins.hfs.0 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-3-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: Idle-Rpc-Conn-Sweeper-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36417 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: ForkJoinPool-2-worker-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-2-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: HMaster-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: weak-ref-cleaner-strictcontextstorage java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HBase-Metrics2-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: Monitor thread for TaskMonitor java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36417 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:36417 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:36417 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: SessionTracker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Potentially hanging thread: nioEventLoopGroup-4-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-3-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-5-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-5-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: master/0450ab8807f5:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async-Client-Retry-Timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/0450ab8807f5:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: ForkJoinPool-2-worker-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) - Thread LEAK? -, OpenFileDescriptor=403 (was 287) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=161 (was 266), ProcessCount=11 (was 11), AvailableMemoryMB=4399 (was 4324) - AvailableMemoryMB LEAK? - 2024-11-16T12:47:03,367 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=81, OpenFileDescriptor=403, MaxFileDescriptor=1048576, SystemLoadAverage=161, ProcessCount=11, AvailableMemoryMB=4398 2024-11-16T12:47:03,367 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-16T12:47:03,367 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/edd6923c-0677-90e0-cc1c-9bbca94963aa/hadoop.log.dir so I do NOT create it in target/test-data/2b8f2a9f-4597-66f8-137a-f9a3ba8c2be4 2024-11-16T12:47:03,368 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/edd6923c-0677-90e0-cc1c-9bbca94963aa/hadoop.tmp.dir so I do NOT create it in target/test-data/2b8f2a9f-4597-66f8-137a-f9a3ba8c2be4 2024-11-16T12:47:03,368 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2b8f2a9f-4597-66f8-137a-f9a3ba8c2be4/cluster_a8c24dd2-409a-e7eb-c8dd-a3e580eeb88f, deleteOnExit=true 2024-11-16T12:47:03,368 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-16T12:47:03,368 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2b8f2a9f-4597-66f8-137a-f9a3ba8c2be4/test.cache.data in system properties and HBase conf 2024-11-16T12:47:03,368 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2b8f2a9f-4597-66f8-137a-f9a3ba8c2be4/hadoop.tmp.dir in system properties and HBase conf 2024-11-16T12:47:03,368 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2b8f2a9f-4597-66f8-137a-f9a3ba8c2be4/hadoop.log.dir in system properties and HBase conf 2024-11-16T12:47:03,368 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2b8f2a9f-4597-66f8-137a-f9a3ba8c2be4/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-16T12:47:03,368 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2b8f2a9f-4597-66f8-137a-f9a3ba8c2be4/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-16T12:47:03,368 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-16T12:47:03,368 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-16T12:47:03,369 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2b8f2a9f-4597-66f8-137a-f9a3ba8c2be4/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-16T12:47:03,369 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2b8f2a9f-4597-66f8-137a-f9a3ba8c2be4/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-16T12:47:03,369 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2b8f2a9f-4597-66f8-137a-f9a3ba8c2be4/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-16T12:47:03,369 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2b8f2a9f-4597-66f8-137a-f9a3ba8c2be4/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-16T12:47:03,369 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2b8f2a9f-4597-66f8-137a-f9a3ba8c2be4/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-16T12:47:03,369 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2b8f2a9f-4597-66f8-137a-f9a3ba8c2be4/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-16T12:47:03,369 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2b8f2a9f-4597-66f8-137a-f9a3ba8c2be4/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-16T12:47:03,369 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2b8f2a9f-4597-66f8-137a-f9a3ba8c2be4/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-16T12:47:03,370 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2b8f2a9f-4597-66f8-137a-f9a3ba8c2be4/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-16T12:47:03,370 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2b8f2a9f-4597-66f8-137a-f9a3ba8c2be4/nfs.dump.dir in system properties and HBase conf 2024-11-16T12:47:03,370 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2b8f2a9f-4597-66f8-137a-f9a3ba8c2be4/java.io.tmpdir in system properties and HBase conf 2024-11-16T12:47:03,370 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2b8f2a9f-4597-66f8-137a-f9a3ba8c2be4/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-16T12:47:03,370 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2b8f2a9f-4597-66f8-137a-f9a3ba8c2be4/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-16T12:47:03,370 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2b8f2a9f-4597-66f8-137a-f9a3ba8c2be4/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-16T12:47:03,388 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-16T12:47:03,646 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T12:47:03,653 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T12:47:03,661 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T12:47:03,661 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T12:47:03,661 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-16T12:47:03,666 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T12:47:03,667 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@15ba4d19{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2b8f2a9f-4597-66f8-137a-f9a3ba8c2be4/hadoop.log.dir/,AVAILABLE} 2024-11-16T12:47:03,667 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@78fa6004{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T12:47:03,787 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@194f043a{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2b8f2a9f-4597-66f8-137a-f9a3ba8c2be4/java.io.tmpdir/jetty-localhost-41003-hadoop-hdfs-3_4_1-tests_jar-_-any-845385118983409958/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-16T12:47:03,788 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4d974b8f{HTTP/1.1, (http/1.1)}{localhost:41003} 2024-11-16T12:47:03,788 INFO [Time-limited test {}] server.Server(415): Started @102675ms 2024-11-16T12:47:03,801 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-16T12:47:04,016 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T12:47:04,020 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T12:47:04,021 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T12:47:04,021 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T12:47:04,021 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-16T12:47:04,022 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7d119060{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2b8f2a9f-4597-66f8-137a-f9a3ba8c2be4/hadoop.log.dir/,AVAILABLE} 2024-11-16T12:47:04,022 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2f2378c9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T12:47:04,131 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@101cd95b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2b8f2a9f-4597-66f8-137a-f9a3ba8c2be4/java.io.tmpdir/jetty-localhost-41697-hadoop-hdfs-3_4_1-tests_jar-_-any-4226896660440903247/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T12:47:04,132 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@260e0b79{HTTP/1.1, (http/1.1)}{localhost:41697} 2024-11-16T12:47:04,132 INFO [Time-limited test {}] server.Server(415): Started @103019ms 2024-11-16T12:47:04,133 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T12:47:04,167 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T12:47:04,171 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T12:47:04,172 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T12:47:04,172 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T12:47:04,172 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-16T12:47:04,173 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2ff5ef6c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2b8f2a9f-4597-66f8-137a-f9a3ba8c2be4/hadoop.log.dir/,AVAILABLE} 2024-11-16T12:47:04,173 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1a2066f8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T12:47:04,285 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2fa3cb60{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2b8f2a9f-4597-66f8-137a-f9a3ba8c2be4/java.io.tmpdir/jetty-localhost-34835-hadoop-hdfs-3_4_1-tests_jar-_-any-1368848264730793458/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T12:47:04,285 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@18662151{HTTP/1.1, (http/1.1)}{localhost:34835} 2024-11-16T12:47:04,285 INFO [Time-limited test {}] server.Server(415): Started @103172ms 2024-11-16T12:47:04,288 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T12:47:05,018 WARN [Thread-452 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2b8f2a9f-4597-66f8-137a-f9a3ba8c2be4/cluster_a8c24dd2-409a-e7eb-c8dd-a3e580eeb88f/data/data1/current/BP-45895880-172.17.0.2-1731761223400/current, will proceed with Du for space computation calculation, 2024-11-16T12:47:05,019 WARN [Thread-453 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2b8f2a9f-4597-66f8-137a-f9a3ba8c2be4/cluster_a8c24dd2-409a-e7eb-c8dd-a3e580eeb88f/data/data2/current/BP-45895880-172.17.0.2-1731761223400/current, will proceed with Du for space computation calculation, 2024-11-16T12:47:05,041 WARN [Thread-416 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T12:47:05,044 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb1bb2991de263a90 with lease ID 0xc0f58b18d6a34d13: Processing first storage report for DS-0eb12472-e0ce-4abf-b180-46c06738fcf0 from datanode DatanodeRegistration(127.0.0.1:42239, datanodeUuid=db346079-5df1-40f8-a291-e2008441fe1b, infoPort=36247, infoSecurePort=0, ipcPort=46293, storageInfo=lv=-57;cid=testClusterID;nsid=1590667436;c=1731761223400) 2024-11-16T12:47:05,044 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb1bb2991de263a90 with lease ID 0xc0f58b18d6a34d13: from storage DS-0eb12472-e0ce-4abf-b180-46c06738fcf0 node DatanodeRegistration(127.0.0.1:42239, datanodeUuid=db346079-5df1-40f8-a291-e2008441fe1b, infoPort=36247, infoSecurePort=0, ipcPort=46293, storageInfo=lv=-57;cid=testClusterID;nsid=1590667436;c=1731761223400), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T12:47:05,044 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb1bb2991de263a90 with lease ID 0xc0f58b18d6a34d13: Processing first storage report for DS-acfc1dfd-ee3c-4b9a-a5b4-ddae681e7552 from datanode DatanodeRegistration(127.0.0.1:42239, datanodeUuid=db346079-5df1-40f8-a291-e2008441fe1b, infoPort=36247, infoSecurePort=0, ipcPort=46293, storageInfo=lv=-57;cid=testClusterID;nsid=1590667436;c=1731761223400) 2024-11-16T12:47:05,044 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb1bb2991de263a90 with lease ID 0xc0f58b18d6a34d13: from storage DS-acfc1dfd-ee3c-4b9a-a5b4-ddae681e7552 node DatanodeRegistration(127.0.0.1:42239, datanodeUuid=db346079-5df1-40f8-a291-e2008441fe1b, infoPort=36247, infoSecurePort=0, ipcPort=46293, storageInfo=lv=-57;cid=testClusterID;nsid=1590667436;c=1731761223400), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T12:47:05,181 WARN [Thread-463 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2b8f2a9f-4597-66f8-137a-f9a3ba8c2be4/cluster_a8c24dd2-409a-e7eb-c8dd-a3e580eeb88f/data/data3/current/BP-45895880-172.17.0.2-1731761223400/current, will proceed with Du for space computation calculation, 2024-11-16T12:47:05,181 WARN [Thread-464 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2b8f2a9f-4597-66f8-137a-f9a3ba8c2be4/cluster_a8c24dd2-409a-e7eb-c8dd-a3e580eeb88f/data/data4/current/BP-45895880-172.17.0.2-1731761223400/current, will proceed with Du for space computation calculation, 2024-11-16T12:47:05,204 WARN [Thread-439 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T12:47:05,207 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe926d96e6c4ae2f6 with lease ID 0xc0f58b18d6a34d14: Processing first storage report for DS-30c03b72-6179-4c62-8dbb-36a6f921676b from datanode DatanodeRegistration(127.0.0.1:43477, datanodeUuid=bc9fe41a-6701-4714-95b0-aeb1aa0b868b, infoPort=43139, infoSecurePort=0, ipcPort=39079, storageInfo=lv=-57;cid=testClusterID;nsid=1590667436;c=1731761223400) 2024-11-16T12:47:05,207 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe926d96e6c4ae2f6 with lease ID 0xc0f58b18d6a34d14: from storage DS-30c03b72-6179-4c62-8dbb-36a6f921676b node DatanodeRegistration(127.0.0.1:43477, datanodeUuid=bc9fe41a-6701-4714-95b0-aeb1aa0b868b, infoPort=43139, infoSecurePort=0, ipcPort=39079, storageInfo=lv=-57;cid=testClusterID;nsid=1590667436;c=1731761223400), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T12:47:05,207 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe926d96e6c4ae2f6 with lease ID 0xc0f58b18d6a34d14: Processing first storage report for DS-485b3b1c-a98c-408f-9b59-a3e792e444f5 from datanode DatanodeRegistration(127.0.0.1:43477, datanodeUuid=bc9fe41a-6701-4714-95b0-aeb1aa0b868b, infoPort=43139, infoSecurePort=0, ipcPort=39079, storageInfo=lv=-57;cid=testClusterID;nsid=1590667436;c=1731761223400) 2024-11-16T12:47:05,207 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe926d96e6c4ae2f6 with lease ID 0xc0f58b18d6a34d14: from storage DS-485b3b1c-a98c-408f-9b59-a3e792e444f5 node DatanodeRegistration(127.0.0.1:43477, datanodeUuid=bc9fe41a-6701-4714-95b0-aeb1aa0b868b, infoPort=43139, infoSecurePort=0, ipcPort=39079, storageInfo=lv=-57;cid=testClusterID;nsid=1590667436;c=1731761223400), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T12:47:05,226 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2b8f2a9f-4597-66f8-137a-f9a3ba8c2be4 2024-11-16T12:47:05,229 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2b8f2a9f-4597-66f8-137a-f9a3ba8c2be4/cluster_a8c24dd2-409a-e7eb-c8dd-a3e580eeb88f/zookeeper_0, clientPort=55508, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2b8f2a9f-4597-66f8-137a-f9a3ba8c2be4/cluster_a8c24dd2-409a-e7eb-c8dd-a3e580eeb88f/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2b8f2a9f-4597-66f8-137a-f9a3ba8c2be4/cluster_a8c24dd2-409a-e7eb-c8dd-a3e580eeb88f/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-16T12:47:05,231 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=55508 2024-11-16T12:47:05,231 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T12:47:05,232 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T12:47:05,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43477 is added to blk_1073741825_1001 (size=7) 2024-11-16T12:47:05,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42239 is added to blk_1073741825_1001 (size=7) 2024-11-16T12:47:05,245 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:32873/user/jenkins/test-data/541b165e-30b6-bbb7-3269-9460dea28db2 with version=8 2024-11-16T12:47:05,246 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/hbase-staging 2024-11-16T12:47:05,248 INFO [Time-limited test {}] client.ConnectionUtils(128): master/0450ab8807f5:0 server-side Connection retries=45 2024-11-16T12:47:05,248 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T12:47:05,249 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-16T12:47:05,249 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-16T12:47:05,249 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T12:47:05,249 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-16T12:47:05,249 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-16T12:47:05,249 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-16T12:47:05,250 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39837 2024-11-16T12:47:05,252 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:39837 connecting to ZooKeeper ensemble=127.0.0.1:55508 2024-11-16T12:47:05,295 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:398370x0, quorum=127.0.0.1:55508, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-16T12:47:05,296 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:39837-0x10144f8d0d10000 connected 2024-11-16T12:47:05,384 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T12:47:05,386 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T12:47:05,389 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39837-0x10144f8d0d10000, quorum=127.0.0.1:55508, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T12:47:05,389 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:32873/user/jenkins/test-data/541b165e-30b6-bbb7-3269-9460dea28db2, hbase.cluster.distributed=false 2024-11-16T12:47:05,391 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39837-0x10144f8d0d10000, quorum=127.0.0.1:55508, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-16T12:47:05,392 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39837 2024-11-16T12:47:05,393 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39837 2024-11-16T12:47:05,393 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39837 2024-11-16T12:47:05,394 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39837 2024-11-16T12:47:05,394 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39837 2024-11-16T12:47:05,412 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/0450ab8807f5:0 server-side Connection retries=45 2024-11-16T12:47:05,412 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T12:47:05,413 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-16T12:47:05,413 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-16T12:47:05,413 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T12:47:05,413 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-16T12:47:05,413 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-16T12:47:05,413 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-16T12:47:05,414 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41605 2024-11-16T12:47:05,415 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:41605 connecting to ZooKeeper ensemble=127.0.0.1:55508 2024-11-16T12:47:05,416 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T12:47:05,418 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T12:47:05,434 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:416050x0, quorum=127.0.0.1:55508, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-16T12:47:05,434 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:416050x0, quorum=127.0.0.1:55508, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T12:47:05,435 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41605-0x10144f8d0d10001 connected 2024-11-16T12:47:05,435 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-16T12:47:05,437 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-16T12:47:05,438 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41605-0x10144f8d0d10001, quorum=127.0.0.1:55508, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-16T12:47:05,439 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41605-0x10144f8d0d10001, quorum=127.0.0.1:55508, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-16T12:47:05,441 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41605 2024-11-16T12:47:05,441 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41605 2024-11-16T12:47:05,442 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41605 2024-11-16T12:47:05,444 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41605 2024-11-16T12:47:05,446 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41605 2024-11-16T12:47:05,464 DEBUG [M:0;0450ab8807f5:39837 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;0450ab8807f5:39837 2024-11-16T12:47:05,466 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/0450ab8807f5,39837,1731761225248 2024-11-16T12:47:05,476 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41605-0x10144f8d0d10001, quorum=127.0.0.1:55508, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T12:47:05,476 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39837-0x10144f8d0d10000, quorum=127.0.0.1:55508, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T12:47:05,476 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39837-0x10144f8d0d10000, quorum=127.0.0.1:55508, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/0450ab8807f5,39837,1731761225248 2024-11-16T12:47:05,484 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39837-0x10144f8d0d10000, quorum=127.0.0.1:55508, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:47:05,484 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41605-0x10144f8d0d10001, quorum=127.0.0.1:55508, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-16T12:47:05,484 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41605-0x10144f8d0d10001, quorum=127.0.0.1:55508, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:47:05,487 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39837-0x10144f8d0d10000, quorum=127.0.0.1:55508, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-16T12:47:05,488 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/0450ab8807f5,39837,1731761225248 from backup master directory 2024-11-16T12:47:05,498 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39837-0x10144f8d0d10000, quorum=127.0.0.1:55508, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/0450ab8807f5,39837,1731761225248 2024-11-16T12:47:05,498 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41605-0x10144f8d0d10001, quorum=127.0.0.1:55508, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T12:47:05,498 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39837-0x10144f8d0d10000, quorum=127.0.0.1:55508, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T12:47:05,498 WARN [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-16T12:47:05,498 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=0450ab8807f5,39837,1731761225248 2024-11-16T12:47:05,504 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:32873/user/jenkins/test-data/541b165e-30b6-bbb7-3269-9460dea28db2/hbase.id] with ID: d8f990be-238f-46ef-b1a6-6a0d50302113 2024-11-16T12:47:05,504 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:32873/user/jenkins/test-data/541b165e-30b6-bbb7-3269-9460dea28db2/.tmp/hbase.id 2024-11-16T12:47:05,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43477 is added to blk_1073741826_1002 (size=42) 2024-11-16T12:47:05,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42239 is added to blk_1073741826_1002 (size=42) 2024-11-16T12:47:05,511 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:32873/user/jenkins/test-data/541b165e-30b6-bbb7-3269-9460dea28db2/.tmp/hbase.id]:[hdfs://localhost:32873/user/jenkins/test-data/541b165e-30b6-bbb7-3269-9460dea28db2/hbase.id] 2024-11-16T12:47:05,529 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T12:47:05,529 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-16T12:47:05,532 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 3ms. 2024-11-16T12:47:05,542 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41605-0x10144f8d0d10001, quorum=127.0.0.1:55508, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:47:05,542 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39837-0x10144f8d0d10000, quorum=127.0.0.1:55508, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:47:05,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43477 is added to blk_1073741827_1003 (size=196) 2024-11-16T12:47:05,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42239 is added to blk_1073741827_1003 (size=196) 2024-11-16T12:47:05,560 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-16T12:47:05,561 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-16T12:47:05,561 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T12:47:05,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42239 is added to blk_1073741828_1004 (size=1189) 2024-11-16T12:47:05,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43477 is added to blk_1073741828_1004 (size=1189) 2024-11-16T12:47:05,582 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:32873/user/jenkins/test-data/541b165e-30b6-bbb7-3269-9460dea28db2/MasterData/data/master/store 2024-11-16T12:47:05,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43477 is added to blk_1073741829_1005 (size=34) 2024-11-16T12:47:05,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42239 is added to blk_1073741829_1005 (size=34) 2024-11-16T12:47:05,591 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T12:47:05,591 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-16T12:47:05,591 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T12:47:05,591 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T12:47:05,591 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-16T12:47:05,591 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T12:47:05,591 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T12:47:05,592 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731761225591Disabling compacts and flushes for region at 1731761225591Disabling writes for close at 1731761225591Writing region close event to WAL at 1731761225591Closed at 1731761225591 2024-11-16T12:47:05,593 WARN [master/0450ab8807f5:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:32873/user/jenkins/test-data/541b165e-30b6-bbb7-3269-9460dea28db2/MasterData/data/master/store/.initializing 2024-11-16T12:47:05,593 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:32873/user/jenkins/test-data/541b165e-30b6-bbb7-3269-9460dea28db2/MasterData/WALs/0450ab8807f5,39837,1731761225248 2024-11-16T12:47:05,597 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0450ab8807f5%2C39837%2C1731761225248, suffix=, logDir=hdfs://localhost:32873/user/jenkins/test-data/541b165e-30b6-bbb7-3269-9460dea28db2/MasterData/WALs/0450ab8807f5,39837,1731761225248, archiveDir=hdfs://localhost:32873/user/jenkins/test-data/541b165e-30b6-bbb7-3269-9460dea28db2/MasterData/oldWALs, maxLogs=10 2024-11-16T12:47:05,598 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0450ab8807f5%2C39837%2C1731761225248.1731761225597 2024-11-16T12:47:05,628 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/541b165e-30b6-bbb7-3269-9460dea28db2/MasterData/WALs/0450ab8807f5,39837,1731761225248/0450ab8807f5%2C39837%2C1731761225248.1731761225597 2024-11-16T12:47:05,632 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36247:36247),(127.0.0.1/127.0.0.1:43139:43139)] 2024-11-16T12:47:05,635 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-16T12:47:05,635 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T12:47:05,636 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T12:47:05,636 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T12:47:05,639 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T12:47:05,641 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-16T12:47:05,642 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:47:05,642 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T12:47:05,643 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T12:47:05,645 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-16T12:47:05,645 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:47:05,646 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T12:47:05,646 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T12:47:05,651 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-16T12:47:05,651 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:47:05,654 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T12:47:05,654 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T12:47:05,657 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-16T12:47:05,657 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:47:05,658 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T12:47:05,658 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T12:47:05,661 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:32873/user/jenkins/test-data/541b165e-30b6-bbb7-3269-9460dea28db2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-16T12:47:05,662 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:32873/user/jenkins/test-data/541b165e-30b6-bbb7-3269-9460dea28db2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-16T12:47:05,664 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T12:47:05,664 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T12:47:05,664 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-16T12:47:05,666 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T12:47:05,668 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:32873/user/jenkins/test-data/541b165e-30b6-bbb7-3269-9460dea28db2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T12:47:05,669 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=768454, jitterRate=-0.022860541939735413}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-16T12:47:05,670 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731761225636Initializing all the Stores at 1731761225637 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731761225638 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731761225638Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731761225638Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731761225638Cleaning up temporary data from old regions at 1731761225664 (+26 ms)Region opened successfully at 1731761225670 (+6 ms) 2024-11-16T12:47:05,678 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-16T12:47:05,684 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@dc6eafb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=0450ab8807f5/172.17.0.2:0 2024-11-16T12:47:05,685 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-16T12:47:05,685 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-16T12:47:05,686 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-16T12:47:05,686 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-16T12:47:05,687 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-16T12:47:05,687 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-16T12:47:05,688 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-16T12:47:05,691 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-16T12:47:05,692 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39837-0x10144f8d0d10000, quorum=127.0.0.1:55508, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-16T12:47:05,706 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-16T12:47:05,707 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-16T12:47:05,708 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39837-0x10144f8d0d10000, quorum=127.0.0.1:55508, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-16T12:47:05,717 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-16T12:47:05,718 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-16T12:47:05,719 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39837-0x10144f8d0d10000, quorum=127.0.0.1:55508, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-16T12:47:05,725 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-16T12:47:05,727 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39837-0x10144f8d0d10000, quorum=127.0.0.1:55508, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-16T12:47:05,734 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-16T12:47:05,736 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39837-0x10144f8d0d10000, quorum=127.0.0.1:55508, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-16T12:47:05,742 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-16T12:47:05,750 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39837-0x10144f8d0d10000, quorum=127.0.0.1:55508, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-16T12:47:05,750 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41605-0x10144f8d0d10001, quorum=127.0.0.1:55508, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-16T12:47:05,750 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39837-0x10144f8d0d10000, quorum=127.0.0.1:55508, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:47:05,750 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41605-0x10144f8d0d10001, quorum=127.0.0.1:55508, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:47:05,751 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=0450ab8807f5,39837,1731761225248, sessionid=0x10144f8d0d10000, setting cluster-up flag (Was=false) 2024-11-16T12:47:05,767 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39837-0x10144f8d0d10000, quorum=127.0.0.1:55508, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:47:05,767 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41605-0x10144f8d0d10001, quorum=127.0.0.1:55508, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:47:05,792 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-16T12:47:05,794 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=0450ab8807f5,39837,1731761225248 2024-11-16T12:47:05,817 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39837-0x10144f8d0d10000, quorum=127.0.0.1:55508, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:47:05,817 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41605-0x10144f8d0d10001, quorum=127.0.0.1:55508, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:47:05,842 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-16T12:47:05,845 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=0450ab8807f5,39837,1731761225248 2024-11-16T12:47:05,847 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:32873/user/jenkins/test-data/541b165e-30b6-bbb7-3269-9460dea28db2/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-16T12:47:05,850 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-16T12:47:05,850 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-16T12:47:05,851 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-16T12:47:05,851 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 0450ab8807f5,39837,1731761225248 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-16T12:47:05,852 INFO [RS:0;0450ab8807f5:41605 {}] regionserver.HRegionServer(746): ClusterId : d8f990be-238f-46ef-b1a6-6a0d50302113 2024-11-16T12:47:05,852 DEBUG [RS:0;0450ab8807f5:41605 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-16T12:47:05,853 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/0450ab8807f5:0, corePoolSize=5, maxPoolSize=5 2024-11-16T12:47:05,853 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/0450ab8807f5:0, corePoolSize=5, maxPoolSize=5 2024-11-16T12:47:05,853 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/0450ab8807f5:0, corePoolSize=5, maxPoolSize=5 2024-11-16T12:47:05,853 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/0450ab8807f5:0, corePoolSize=5, maxPoolSize=5 2024-11-16T12:47:05,853 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/0450ab8807f5:0, corePoolSize=10, maxPoolSize=10 2024-11-16T12:47:05,853 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:47:05,853 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/0450ab8807f5:0, corePoolSize=2, maxPoolSize=2 2024-11-16T12:47:05,854 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:47:05,854 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731761255854 2024-11-16T12:47:05,855 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-16T12:47:05,855 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-16T12:47:05,855 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-16T12:47:05,855 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-16T12:47:05,855 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-16T12:47:05,855 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-16T12:47:05,855 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-16T12:47:05,856 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T12:47:05,856 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-16T12:47:05,856 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-16T12:47:05,856 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-16T12:47:05,856 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-16T12:47:05,856 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-16T12:47:05,857 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-16T12:47:05,857 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/0450ab8807f5:0:becomeActiveMaster-HFileCleaner.large.0-1731761225857,5,FailOnTimeoutGroup] 2024-11-16T12:47:05,857 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/0450ab8807f5:0:becomeActiveMaster-HFileCleaner.small.0-1731761225857,5,FailOnTimeoutGroup] 2024-11-16T12:47:05,857 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-16T12:47:05,857 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-16T12:47:05,857 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-16T12:47:05,857 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:47:05,857 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-16T12:47:05,857 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-16T12:47:05,860 DEBUG [RS:0;0450ab8807f5:41605 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-16T12:47:05,860 DEBUG [RS:0;0450ab8807f5:41605 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-16T12:47:05,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43477 is added to blk_1073741831_1007 (size=1321) 2024-11-16T12:47:05,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42239 is added to blk_1073741831_1007 (size=1321) 2024-11-16T12:47:05,868 DEBUG [RS:0;0450ab8807f5:41605 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-16T12:47:05,869 DEBUG [RS:0;0450ab8807f5:41605 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5610c897, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=0450ab8807f5/172.17.0.2:0 2024-11-16T12:47:05,870 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:32873/user/jenkins/test-data/541b165e-30b6-bbb7-3269-9460dea28db2/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-16T12:47:05,870 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:32873/user/jenkins/test-data/541b165e-30b6-bbb7-3269-9460dea28db2 2024-11-16T12:47:05,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42239 is added to blk_1073741832_1008 (size=32) 2024-11-16T12:47:05,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43477 is added to blk_1073741832_1008 (size=32) 2024-11-16T12:47:05,881 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T12:47:05,882 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-16T12:47:05,884 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-16T12:47:05,884 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:47:05,885 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T12:47:05,885 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-16T12:47:05,886 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-16T12:47:05,886 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:47:05,887 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T12:47:05,887 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-16T12:47:05,889 DEBUG [RS:0;0450ab8807f5:41605 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;0450ab8807f5:41605 2024-11-16T12:47:05,889 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-16T12:47:05,889 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:47:05,889 INFO [RS:0;0450ab8807f5:41605 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-16T12:47:05,889 INFO [RS:0;0450ab8807f5:41605 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-16T12:47:05,889 DEBUG [RS:0;0450ab8807f5:41605 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-16T12:47:05,889 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T12:47:05,890 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-16T12:47:05,890 INFO [RS:0;0450ab8807f5:41605 {}] regionserver.HRegionServer(2659): reportForDuty to master=0450ab8807f5,39837,1731761225248 with port=41605, startcode=1731761225412 2024-11-16T12:47:05,890 DEBUG [RS:0;0450ab8807f5:41605 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-16T12:47:05,891 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-16T12:47:05,892 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:47:05,892 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T12:47:05,892 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-16T12:47:05,893 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:32873/user/jenkins/test-data/541b165e-30b6-bbb7-3269-9460dea28db2/data/hbase/meta/1588230740 2024-11-16T12:47:05,894 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:32873/user/jenkins/test-data/541b165e-30b6-bbb7-3269-9460dea28db2/data/hbase/meta/1588230740 2024-11-16T12:47:05,894 INFO [HMaster-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37933, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-16T12:47:05,895 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39837 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 0450ab8807f5,41605,1731761225412 2024-11-16T12:47:05,895 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39837 {}] master.ServerManager(517): Registering regionserver=0450ab8807f5,41605,1731761225412 2024-11-16T12:47:05,896 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-16T12:47:05,896 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-16T12:47:05,896 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-16T12:47:05,897 DEBUG [RS:0;0450ab8807f5:41605 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:32873/user/jenkins/test-data/541b165e-30b6-bbb7-3269-9460dea28db2 2024-11-16T12:47:05,897 DEBUG [RS:0;0450ab8807f5:41605 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:32873 2024-11-16T12:47:05,898 DEBUG [RS:0;0450ab8807f5:41605 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-16T12:47:05,898 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-16T12:47:05,900 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:32873/user/jenkins/test-data/541b165e-30b6-bbb7-3269-9460dea28db2/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T12:47:05,901 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=834555, jitterRate=0.06119281053543091}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-16T12:47:05,903 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731761225881Initializing all the Stores at 1731761225882 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731761225882Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731761225882Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731761225882Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731761225882Cleaning up temporary data from old regions at 1731761225896 (+14 ms)Region opened successfully at 1731761225903 (+7 ms) 2024-11-16T12:47:05,903 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-16T12:47:05,903 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-16T12:47:05,903 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-16T12:47:05,903 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-16T12:47:05,903 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-16T12:47:05,903 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-16T12:47:05,904 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731761225903Disabling compacts and flushes for region at 1731761225903Disabling writes for close at 1731761225903Writing region close event to WAL at 1731761225903Closed at 1731761225903 2024-11-16T12:47:05,905 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T12:47:05,905 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-16T12:47:05,905 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-16T12:47:05,907 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-16T12:47:05,909 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-16T12:47:05,909 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39837-0x10144f8d0d10000, quorum=127.0.0.1:55508, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-16T12:47:05,910 DEBUG [RS:0;0450ab8807f5:41605 {}] zookeeper.ZKUtil(111): regionserver:41605-0x10144f8d0d10001, quorum=127.0.0.1:55508, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/0450ab8807f5,41605,1731761225412 2024-11-16T12:47:05,910 WARN [RS:0;0450ab8807f5:41605 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-16T12:47:05,910 INFO [RS:0;0450ab8807f5:41605 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T12:47:05,910 DEBUG [RS:0;0450ab8807f5:41605 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:32873/user/jenkins/test-data/541b165e-30b6-bbb7-3269-9460dea28db2/WALs/0450ab8807f5,41605,1731761225412 2024-11-16T12:47:05,910 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [0450ab8807f5,41605,1731761225412] 2024-11-16T12:47:05,917 INFO [RS:0;0450ab8807f5:41605 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-16T12:47:05,921 INFO [RS:0;0450ab8807f5:41605 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-16T12:47:05,921 INFO [RS:0;0450ab8807f5:41605 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-16T12:47:05,921 INFO [RS:0;0450ab8807f5:41605 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T12:47:05,922 INFO [RS:0;0450ab8807f5:41605 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-16T12:47:05,923 INFO [RS:0;0450ab8807f5:41605 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-16T12:47:05,923 INFO [RS:0;0450ab8807f5:41605 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-16T12:47:05,923 DEBUG [RS:0;0450ab8807f5:41605 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:47:05,923 DEBUG [RS:0;0450ab8807f5:41605 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:47:05,923 DEBUG [RS:0;0450ab8807f5:41605 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:47:05,923 DEBUG [RS:0;0450ab8807f5:41605 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:47:05,923 DEBUG [RS:0;0450ab8807f5:41605 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:47:05,923 DEBUG [RS:0;0450ab8807f5:41605 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/0450ab8807f5:0, corePoolSize=2, maxPoolSize=2 2024-11-16T12:47:05,924 DEBUG [RS:0;0450ab8807f5:41605 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:47:05,924 DEBUG [RS:0;0450ab8807f5:41605 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:47:05,924 DEBUG [RS:0;0450ab8807f5:41605 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:47:05,924 DEBUG [RS:0;0450ab8807f5:41605 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:47:05,924 DEBUG [RS:0;0450ab8807f5:41605 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:47:05,924 DEBUG [RS:0;0450ab8807f5:41605 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:47:05,924 DEBUG [RS:0;0450ab8807f5:41605 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/0450ab8807f5:0, corePoolSize=3, maxPoolSize=3 2024-11-16T12:47:05,924 DEBUG [RS:0;0450ab8807f5:41605 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/0450ab8807f5:0, corePoolSize=3, maxPoolSize=3 2024-11-16T12:47:05,925 INFO [RS:0;0450ab8807f5:41605 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T12:47:05,925 INFO [RS:0;0450ab8807f5:41605 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T12:47:05,925 INFO [RS:0;0450ab8807f5:41605 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T12:47:05,925 INFO [RS:0;0450ab8807f5:41605 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-16T12:47:05,925 INFO [RS:0;0450ab8807f5:41605 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-16T12:47:05,925 INFO [RS:0;0450ab8807f5:41605 {}] hbase.ChoreService(168): Chore ScheduledChore name=0450ab8807f5,41605,1731761225412-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-16T12:47:05,940 INFO [RS:0;0450ab8807f5:41605 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-16T12:47:05,940 INFO [RS:0;0450ab8807f5:41605 {}] hbase.ChoreService(168): Chore ScheduledChore name=0450ab8807f5,41605,1731761225412-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T12:47:05,940 INFO [RS:0;0450ab8807f5:41605 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T12:47:05,940 INFO [RS:0;0450ab8807f5:41605 {}] regionserver.Replication(171): 0450ab8807f5,41605,1731761225412 started 2024-11-16T12:47:05,955 INFO [RS:0;0450ab8807f5:41605 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T12:47:05,955 INFO [RS:0;0450ab8807f5:41605 {}] regionserver.HRegionServer(1482): Serving as 0450ab8807f5,41605,1731761225412, RpcServer on 0450ab8807f5/172.17.0.2:41605, sessionid=0x10144f8d0d10001 2024-11-16T12:47:05,955 DEBUG [RS:0;0450ab8807f5:41605 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-16T12:47:05,955 DEBUG [RS:0;0450ab8807f5:41605 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 0450ab8807f5,41605,1731761225412 2024-11-16T12:47:05,955 DEBUG [RS:0;0450ab8807f5:41605 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '0450ab8807f5,41605,1731761225412' 2024-11-16T12:47:05,955 DEBUG [RS:0;0450ab8807f5:41605 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-16T12:47:05,956 DEBUG [RS:0;0450ab8807f5:41605 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-16T12:47:05,957 DEBUG [RS:0;0450ab8807f5:41605 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-16T12:47:05,957 DEBUG [RS:0;0450ab8807f5:41605 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-16T12:47:05,957 DEBUG [RS:0;0450ab8807f5:41605 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 0450ab8807f5,41605,1731761225412 2024-11-16T12:47:05,957 DEBUG [RS:0;0450ab8807f5:41605 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '0450ab8807f5,41605,1731761225412' 2024-11-16T12:47:05,957 DEBUG [RS:0;0450ab8807f5:41605 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-16T12:47:05,957 DEBUG [RS:0;0450ab8807f5:41605 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-16T12:47:05,958 DEBUG [RS:0;0450ab8807f5:41605 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-16T12:47:05,958 INFO [RS:0;0450ab8807f5:41605 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-16T12:47:05,958 INFO [RS:0;0450ab8807f5:41605 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-16T12:47:06,059 WARN [0450ab8807f5:39837 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-16T12:47:06,062 INFO [RS:0;0450ab8807f5:41605 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0450ab8807f5%2C41605%2C1731761225412, suffix=, logDir=hdfs://localhost:32873/user/jenkins/test-data/541b165e-30b6-bbb7-3269-9460dea28db2/WALs/0450ab8807f5,41605,1731761225412, archiveDir=hdfs://localhost:32873/user/jenkins/test-data/541b165e-30b6-bbb7-3269-9460dea28db2/oldWALs, maxLogs=32 2024-11-16T12:47:06,067 INFO [RS:0;0450ab8807f5:41605 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0450ab8807f5%2C41605%2C1731761225412.1731761226066 2024-11-16T12:47:06,077 INFO [RS:0;0450ab8807f5:41605 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/541b165e-30b6-bbb7-3269-9460dea28db2/WALs/0450ab8807f5,41605,1731761225412/0450ab8807f5%2C41605%2C1731761225412.1731761226066 2024-11-16T12:47:06,083 DEBUG [RS:0;0450ab8807f5:41605 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36247:36247),(127.0.0.1/127.0.0.1:43139:43139)] 2024-11-16T12:47:06,310 DEBUG [0450ab8807f5:39837 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-16T12:47:06,310 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=0450ab8807f5,41605,1731761225412 2024-11-16T12:47:06,314 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 0450ab8807f5,41605,1731761225412, state=OPENING 2024-11-16T12:47:06,367 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-16T12:47:06,375 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39837-0x10144f8d0d10000, quorum=127.0.0.1:55508, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:47:06,375 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41605-0x10144f8d0d10001, quorum=127.0.0.1:55508, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:47:06,376 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-16T12:47:06,376 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T12:47:06,376 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T12:47:06,376 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=0450ab8807f5,41605,1731761225412}] 2024-11-16T12:47:06,531 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-16T12:47:06,535 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44397, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-16T12:47:06,540 INFO [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-16T12:47:06,540 INFO [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T12:47:06,543 INFO [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0450ab8807f5%2C41605%2C1731761225412.meta, suffix=.meta, logDir=hdfs://localhost:32873/user/jenkins/test-data/541b165e-30b6-bbb7-3269-9460dea28db2/WALs/0450ab8807f5,41605,1731761225412, archiveDir=hdfs://localhost:32873/user/jenkins/test-data/541b165e-30b6-bbb7-3269-9460dea28db2/oldWALs, maxLogs=32 2024-11-16T12:47:06,546 INFO [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 0450ab8807f5%2C41605%2C1731761225412.meta.1731761226546.meta 2024-11-16T12:47:06,553 INFO [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/541b165e-30b6-bbb7-3269-9460dea28db2/WALs/0450ab8807f5,41605,1731761225412/0450ab8807f5%2C41605%2C1731761225412.meta.1731761226546.meta 2024-11-16T12:47:06,554 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36247:36247),(127.0.0.1/127.0.0.1:43139:43139)] 2024-11-16T12:47:06,555 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-16T12:47:06,556 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-16T12:47:06,556 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-16T12:47:06,556 INFO [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-16T12:47:06,556 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-16T12:47:06,556 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T12:47:06,556 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-16T12:47:06,556 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-16T12:47:06,562 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-16T12:47:06,564 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-16T12:47:06,564 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:47:06,564 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T12:47:06,565 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-16T12:47:06,566 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-16T12:47:06,566 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:47:06,567 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T12:47:06,567 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-16T12:47:06,568 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-16T12:47:06,568 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:47:06,569 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T12:47:06,569 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-16T12:47:06,570 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-16T12:47:06,570 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:47:06,570 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T12:47:06,571 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-16T12:47:06,571 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:32873/user/jenkins/test-data/541b165e-30b6-bbb7-3269-9460dea28db2/data/hbase/meta/1588230740 2024-11-16T12:47:06,573 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:32873/user/jenkins/test-data/541b165e-30b6-bbb7-3269-9460dea28db2/data/hbase/meta/1588230740 2024-11-16T12:47:06,574 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-16T12:47:06,574 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-16T12:47:06,575 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-16T12:47:06,577 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-16T12:47:06,579 INFO [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=805587, jitterRate=0.02435709536075592}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-16T12:47:06,579 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-16T12:47:06,580 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731761226557Writing region info on filesystem at 1731761226557Initializing all the Stores at 1731761226558 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731761226558Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731761226562 (+4 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731761226562Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731761226562Cleaning up temporary data from old regions at 1731761226574 (+12 ms)Running coprocessor post-open hooks at 1731761226579 (+5 ms)Region opened successfully at 1731761226580 (+1 ms) 2024-11-16T12:47:06,581 INFO [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731761226530 2024-11-16T12:47:06,584 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-16T12:47:06,585 INFO [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-16T12:47:06,586 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=0450ab8807f5,41605,1731761225412 2024-11-16T12:47:06,587 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 0450ab8807f5,41605,1731761225412, state=OPEN 2024-11-16T12:47:06,671 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39837-0x10144f8d0d10000, quorum=127.0.0.1:55508, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-16T12:47:06,671 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41605-0x10144f8d0d10001, quorum=127.0.0.1:55508, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-16T12:47:06,671 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=0450ab8807f5,41605,1731761225412 2024-11-16T12:47:06,671 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T12:47:06,671 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T12:47:06,674 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-16T12:47:06,675 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=0450ab8807f5,41605,1731761225412 in 295 msec 2024-11-16T12:47:06,678 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-16T12:47:06,678 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 770 msec 2024-11-16T12:47:06,680 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T12:47:06,680 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-16T12:47:06,682 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T12:47:06,682 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=0450ab8807f5,41605,1731761225412, seqNum=-1] 2024-11-16T12:47:06,682 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T12:47:06,684 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41173, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T12:47:06,695 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 843 msec 2024-11-16T12:47:06,695 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731761226695, completionTime=-1 2024-11-16T12:47:06,695 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-16T12:47:06,695 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-16T12:47:06,699 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-16T12:47:06,699 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731761286699 2024-11-16T12:47:06,699 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731761346699 2024-11-16T12:47:06,700 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 4 msec 2024-11-16T12:47:06,700 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0450ab8807f5,39837,1731761225248-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T12:47:06,700 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0450ab8807f5,39837,1731761225248-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T12:47:06,700 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0450ab8807f5,39837,1731761225248-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T12:47:06,700 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-0450ab8807f5:39837, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T12:47:06,700 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-16T12:47:06,701 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-16T12:47:06,704 DEBUG [master/0450ab8807f5:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-16T12:47:06,707 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.208sec 2024-11-16T12:47:06,707 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-16T12:47:06,707 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-16T12:47:06,707 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-16T12:47:06,707 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-16T12:47:06,707 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-16T12:47:06,707 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0450ab8807f5,39837,1731761225248-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-16T12:47:06,707 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0450ab8807f5,39837,1731761225248-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-16T12:47:06,710 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-16T12:47:06,710 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-16T12:47:06,711 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0450ab8807f5,39837,1731761225248-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T12:47:06,753 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2cfccaa6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T12:47:06,753 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 0450ab8807f5,39837,-1 for getting cluster id 2024-11-16T12:47:06,754 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-16T12:47:06,756 DEBUG [HMaster-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'd8f990be-238f-46ef-b1a6-6a0d50302113' 2024-11-16T12:47:06,756 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-16T12:47:06,756 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "d8f990be-238f-46ef-b1a6-6a0d50302113" 2024-11-16T12:47:06,757 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@56abadfd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T12:47:06,757 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [0450ab8807f5,39837,-1] 2024-11-16T12:47:06,757 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-16T12:47:06,758 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T12:47:06,759 INFO [HMaster-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36026, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-16T12:47:06,760 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@8c7f5d8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T12:47:06,761 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T12:47:06,762 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=0450ab8807f5,41605,1731761225412, seqNum=-1] 2024-11-16T12:47:06,763 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T12:47:06,765 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51818, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T12:47:06,767 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=0450ab8807f5,39837,1731761225248 2024-11-16T12:47:06,768 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T12:47:06,770 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-16T12:47:06,771 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-16T12:47:06,771 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-16T12:47:06,771 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T12:47:06,771 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T12:47:06,771 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T12:47:06,771 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-16T12:47:06,771 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-16T12:47:06,771 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1676223287, stopped=false 2024-11-16T12:47:06,771 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=0450ab8807f5,39837,1731761225248 2024-11-16T12:47:06,807 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39837-0x10144f8d0d10000, quorum=127.0.0.1:55508, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-16T12:47:06,807 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41605-0x10144f8d0d10001, quorum=127.0.0.1:55508, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-16T12:47:06,807 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39837-0x10144f8d0d10000, quorum=127.0.0.1:55508, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:47:06,807 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-16T12:47:06,807 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41605-0x10144f8d0d10001, quorum=127.0.0.1:55508, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:47:06,807 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-16T12:47:06,807 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T12:47:06,807 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T12:47:06,807 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:39837-0x10144f8d0d10000, quorum=127.0.0.1:55508, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T12:47:06,807 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41605-0x10144f8d0d10001, quorum=127.0.0.1:55508, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T12:47:06,807 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '0450ab8807f5,41605,1731761225412' ***** 2024-11-16T12:47:06,807 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-16T12:47:06,808 INFO [RS:0;0450ab8807f5:41605 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-16T12:47:06,808 INFO [RS:0;0450ab8807f5:41605 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-16T12:47:06,808 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-16T12:47:06,808 INFO [RS:0;0450ab8807f5:41605 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-16T12:47:06,808 INFO [RS:0;0450ab8807f5:41605 {}] regionserver.HRegionServer(959): stopping server 0450ab8807f5,41605,1731761225412 2024-11-16T12:47:06,808 INFO [RS:0;0450ab8807f5:41605 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-16T12:47:06,808 INFO [RS:0;0450ab8807f5:41605 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;0450ab8807f5:41605. 2024-11-16T12:47:06,808 DEBUG [RS:0;0450ab8807f5:41605 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T12:47:06,808 DEBUG [RS:0;0450ab8807f5:41605 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T12:47:06,808 INFO [RS:0;0450ab8807f5:41605 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-16T12:47:06,808 INFO [RS:0;0450ab8807f5:41605 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-16T12:47:06,808 INFO [RS:0;0450ab8807f5:41605 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-16T12:47:06,809 INFO [RS:0;0450ab8807f5:41605 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-16T12:47:06,809 INFO [RS:0;0450ab8807f5:41605 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-16T12:47:06,809 DEBUG [RS:0;0450ab8807f5:41605 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-16T12:47:06,809 DEBUG [RS:0;0450ab8807f5:41605 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-16T12:47:06,809 DEBUG [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-16T12:47:06,809 INFO [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-16T12:47:06,809 DEBUG [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-16T12:47:06,809 DEBUG [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-16T12:47:06,809 DEBUG [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-16T12:47:06,809 INFO [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-16T12:47:06,825 DEBUG [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32873/user/jenkins/test-data/541b165e-30b6-bbb7-3269-9460dea28db2/data/hbase/meta/1588230740/.tmp/ns/15e7a902705341a1a5660823bdd70e48 is 43, key is default/ns:d/1731761226685/Put/seqid=0 2024-11-16T12:47:06,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42239 is added to blk_1073741835_1011 (size=5153) 2024-11-16T12:47:06,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43477 is added to blk_1073741835_1011 (size=5153) 2024-11-16T12:47:06,832 INFO [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:32873/user/jenkins/test-data/541b165e-30b6-bbb7-3269-9460dea28db2/data/hbase/meta/1588230740/.tmp/ns/15e7a902705341a1a5660823bdd70e48 2024-11-16T12:47:06,839 DEBUG [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32873/user/jenkins/test-data/541b165e-30b6-bbb7-3269-9460dea28db2/data/hbase/meta/1588230740/.tmp/ns/15e7a902705341a1a5660823bdd70e48 as hdfs://localhost:32873/user/jenkins/test-data/541b165e-30b6-bbb7-3269-9460dea28db2/data/hbase/meta/1588230740/ns/15e7a902705341a1a5660823bdd70e48 2024-11-16T12:47:06,846 INFO [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32873/user/jenkins/test-data/541b165e-30b6-bbb7-3269-9460dea28db2/data/hbase/meta/1588230740/ns/15e7a902705341a1a5660823bdd70e48, entries=2, sequenceid=6, filesize=5.0 K 2024-11-16T12:47:06,847 INFO [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 38ms, sequenceid=6, compaction requested=false 2024-11-16T12:47:06,847 DEBUG [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-16T12:47:06,852 DEBUG [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:32873/user/jenkins/test-data/541b165e-30b6-bbb7-3269-9460dea28db2/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-16T12:47:06,853 DEBUG [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-16T12:47:06,853 INFO [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-16T12:47:06,853 DEBUG [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731761226809Running coprocessor pre-close hooks at 1731761226809Disabling compacts and flushes for region at 1731761226809Disabling writes for close at 1731761226809Obtaining lock to block concurrent updates at 1731761226809Preparing flush snapshotting stores in 1588230740 at 1731761226809Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1731761226810 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731761226810Flushing 1588230740/ns: creating writer at 1731761226811 (+1 ms)Flushing 1588230740/ns: appending metadata at 1731761226825 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1731761226825Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1e47d7fb: reopening flushed file at 1731761226838 (+13 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 38ms, sequenceid=6, compaction requested=false at 1731761226847 (+9 ms)Writing region close event to WAL at 1731761226848 (+1 ms)Running coprocessor post-close hooks at 1731761226853 (+5 ms)Closed at 1731761226853 2024-11-16T12:47:06,853 DEBUG [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-16T12:47:06,925 INFO [regionserver/0450ab8807f5:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-16T12:47:06,925 INFO [regionserver/0450ab8807f5:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-16T12:47:06,950 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-16T12:47:06,950 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-16T12:47:06,951 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-16T12:47:07,009 INFO [RS:0;0450ab8807f5:41605 {}] regionserver.HRegionServer(976): stopping server 0450ab8807f5,41605,1731761225412; all regions closed. 2024-11-16T12:47:07,010 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:47:07,010 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:47:07,011 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:47:07,011 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:47:07,012 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:47:07,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43477 is added to blk_1073741834_1010 (size=1152) 2024-11-16T12:47:07,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42239 is added to blk_1073741834_1010 (size=1152) 2024-11-16T12:47:07,022 DEBUG [RS:0;0450ab8807f5:41605 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/541b165e-30b6-bbb7-3269-9460dea28db2/oldWALs 2024-11-16T12:47:07,022 INFO [RS:0;0450ab8807f5:41605 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 0450ab8807f5%2C41605%2C1731761225412.meta:.meta(num 1731761226546) 2024-11-16T12:47:07,023 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:47:07,023 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:47:07,023 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:47:07,023 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:47:07,024 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:47:07,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43477 is added to blk_1073741833_1009 (size=93) 2024-11-16T12:47:07,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42239 is added to blk_1073741833_1009 (size=93) 2024-11-16T12:47:07,028 DEBUG [RS:0;0450ab8807f5:41605 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/541b165e-30b6-bbb7-3269-9460dea28db2/oldWALs 2024-11-16T12:47:07,028 INFO [RS:0;0450ab8807f5:41605 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 0450ab8807f5%2C41605%2C1731761225412:(num 1731761226066) 2024-11-16T12:47:07,028 DEBUG [RS:0;0450ab8807f5:41605 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T12:47:07,028 INFO [RS:0;0450ab8807f5:41605 {}] regionserver.LeaseManager(133): Closed leases 2024-11-16T12:47:07,028 INFO [RS:0;0450ab8807f5:41605 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-16T12:47:07,028 INFO [RS:0;0450ab8807f5:41605 {}] hbase.ChoreService(370): Chore service for: regionserver/0450ab8807f5:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-16T12:47:07,028 INFO [RS:0;0450ab8807f5:41605 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-16T12:47:07,029 INFO [regionserver/0450ab8807f5:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-16T12:47:07,029 INFO [RS:0;0450ab8807f5:41605 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41605 2024-11-16T12:47:07,084 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41605-0x10144f8d0d10001, quorum=127.0.0.1:55508, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/0450ab8807f5,41605,1731761225412 2024-11-16T12:47:07,084 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39837-0x10144f8d0d10000, quorum=127.0.0.1:55508, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-16T12:47:07,084 INFO [RS:0;0450ab8807f5:41605 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-16T12:47:07,092 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [0450ab8807f5,41605,1731761225412] 2024-11-16T12:47:07,100 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/0450ab8807f5,41605,1731761225412 already deleted, retry=false 2024-11-16T12:47:07,100 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 0450ab8807f5,41605,1731761225412 expired; onlineServers=0 2024-11-16T12:47:07,100 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '0450ab8807f5,39837,1731761225248' ***** 2024-11-16T12:47:07,100 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-16T12:47:07,100 INFO [M:0;0450ab8807f5:39837 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-16T12:47:07,100 INFO [M:0;0450ab8807f5:39837 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-16T12:47:07,101 DEBUG [M:0;0450ab8807f5:39837 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-16T12:47:07,101 DEBUG [M:0;0450ab8807f5:39837 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-16T12:47:07,101 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-16T12:47:07,101 DEBUG [master/0450ab8807f5:0:becomeActiveMaster-HFileCleaner.small.0-1731761225857 {}] cleaner.HFileCleaner(306): Exit Thread[master/0450ab8807f5:0:becomeActiveMaster-HFileCleaner.small.0-1731761225857,5,FailOnTimeoutGroup] 2024-11-16T12:47:07,101 DEBUG [master/0450ab8807f5:0:becomeActiveMaster-HFileCleaner.large.0-1731761225857 {}] cleaner.HFileCleaner(306): Exit Thread[master/0450ab8807f5:0:becomeActiveMaster-HFileCleaner.large.0-1731761225857,5,FailOnTimeoutGroup] 2024-11-16T12:47:07,101 INFO [M:0;0450ab8807f5:39837 {}] hbase.ChoreService(370): Chore service for: master/0450ab8807f5:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-16T12:47:07,101 INFO [M:0;0450ab8807f5:39837 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-16T12:47:07,102 DEBUG [M:0;0450ab8807f5:39837 {}] master.HMaster(1795): Stopping service threads 2024-11-16T12:47:07,102 INFO [M:0;0450ab8807f5:39837 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-16T12:47:07,102 INFO [M:0;0450ab8807f5:39837 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-16T12:47:07,102 INFO [M:0;0450ab8807f5:39837 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-16T12:47:07,102 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-16T12:47:07,109 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39837-0x10144f8d0d10000, quorum=127.0.0.1:55508, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-16T12:47:07,109 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39837-0x10144f8d0d10000, quorum=127.0.0.1:55508, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:47:07,109 DEBUG [M:0;0450ab8807f5:39837 {}] zookeeper.ZKUtil(347): master:39837-0x10144f8d0d10000, quorum=127.0.0.1:55508, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-16T12:47:07,109 WARN [M:0;0450ab8807f5:39837 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-16T12:47:07,110 INFO [M:0;0450ab8807f5:39837 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:32873/user/jenkins/test-data/541b165e-30b6-bbb7-3269-9460dea28db2/.lastflushedseqids 2024-11-16T12:47:07,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43477 is added to blk_1073741836_1012 (size=99) 2024-11-16T12:47:07,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42239 is added to blk_1073741836_1012 (size=99) 2024-11-16T12:47:07,118 INFO [M:0;0450ab8807f5:39837 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-16T12:47:07,119 INFO [M:0;0450ab8807f5:39837 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-16T12:47:07,119 DEBUG [M:0;0450ab8807f5:39837 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-16T12:47:07,119 INFO [M:0;0450ab8807f5:39837 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T12:47:07,119 DEBUG [M:0;0450ab8807f5:39837 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T12:47:07,119 DEBUG [M:0;0450ab8807f5:39837 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-16T12:47:07,119 DEBUG [M:0;0450ab8807f5:39837 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T12:47:07,119 INFO [M:0;0450ab8807f5:39837 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-16T12:47:07,138 DEBUG [M:0;0450ab8807f5:39837 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32873/user/jenkins/test-data/541b165e-30b6-bbb7-3269-9460dea28db2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/0b7300856b464ddb942ccdcf80548ee0 is 82, key is hbase:meta,,1/info:regioninfo/1731761226586/Put/seqid=0 2024-11-16T12:47:07,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43477 is added to blk_1073741837_1013 (size=5672) 2024-11-16T12:47:07,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42239 is added to blk_1073741837_1013 (size=5672) 2024-11-16T12:47:07,145 INFO [M:0;0450ab8807f5:39837 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:32873/user/jenkins/test-data/541b165e-30b6-bbb7-3269-9460dea28db2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/0b7300856b464ddb942ccdcf80548ee0 2024-11-16T12:47:07,167 DEBUG [M:0;0450ab8807f5:39837 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32873/user/jenkins/test-data/541b165e-30b6-bbb7-3269-9460dea28db2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a46094090df64991a07021fa32607887 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1731761226693/Put/seqid=0 2024-11-16T12:47:07,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43477 is added to blk_1073741838_1014 (size=5275) 2024-11-16T12:47:07,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42239 is added to blk_1073741838_1014 (size=5275) 2024-11-16T12:47:07,174 INFO [M:0;0450ab8807f5:39837 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:32873/user/jenkins/test-data/541b165e-30b6-bbb7-3269-9460dea28db2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a46094090df64991a07021fa32607887 2024-11-16T12:47:07,192 INFO [RS:0;0450ab8807f5:41605 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-16T12:47:07,192 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41605-0x10144f8d0d10001, quorum=127.0.0.1:55508, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T12:47:07,192 INFO [RS:0;0450ab8807f5:41605 {}] regionserver.HRegionServer(1031): Exiting; stopping=0450ab8807f5,41605,1731761225412; zookeeper connection closed. 2024-11-16T12:47:07,192 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41605-0x10144f8d0d10001, quorum=127.0.0.1:55508, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T12:47:07,192 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@594d302c {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@594d302c 2024-11-16T12:47:07,193 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-16T12:47:07,196 DEBUG [M:0;0450ab8807f5:39837 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32873/user/jenkins/test-data/541b165e-30b6-bbb7-3269-9460dea28db2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/54450193ba574c81863837b0c8ed7016 is 69, key is 0450ab8807f5,41605,1731761225412/rs:state/1731761225895/Put/seqid=0 2024-11-16T12:47:07,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42239 is added to blk_1073741839_1015 (size=5156) 2024-11-16T12:47:07,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43477 is added to blk_1073741839_1015 (size=5156) 2024-11-16T12:47:07,202 INFO [M:0;0450ab8807f5:39837 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:32873/user/jenkins/test-data/541b165e-30b6-bbb7-3269-9460dea28db2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/54450193ba574c81863837b0c8ed7016 2024-11-16T12:47:07,227 DEBUG [M:0;0450ab8807f5:39837 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32873/user/jenkins/test-data/541b165e-30b6-bbb7-3269-9460dea28db2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/0ed345d3c81043de8afa19551bfa5c59 is 52, key is load_balancer_on/state:d/1731761226769/Put/seqid=0 2024-11-16T12:47:07,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42239 is added to blk_1073741840_1016 (size=5056) 2024-11-16T12:47:07,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43477 is added to blk_1073741840_1016 (size=5056) 2024-11-16T12:47:07,233 INFO [M:0;0450ab8807f5:39837 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:32873/user/jenkins/test-data/541b165e-30b6-bbb7-3269-9460dea28db2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/0ed345d3c81043de8afa19551bfa5c59 2024-11-16T12:47:07,240 DEBUG [M:0;0450ab8807f5:39837 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32873/user/jenkins/test-data/541b165e-30b6-bbb7-3269-9460dea28db2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/0b7300856b464ddb942ccdcf80548ee0 as hdfs://localhost:32873/user/jenkins/test-data/541b165e-30b6-bbb7-3269-9460dea28db2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/0b7300856b464ddb942ccdcf80548ee0 2024-11-16T12:47:07,246 INFO [M:0;0450ab8807f5:39837 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32873/user/jenkins/test-data/541b165e-30b6-bbb7-3269-9460dea28db2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/0b7300856b464ddb942ccdcf80548ee0, entries=8, sequenceid=29, filesize=5.5 K 2024-11-16T12:47:07,248 DEBUG [M:0;0450ab8807f5:39837 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32873/user/jenkins/test-data/541b165e-30b6-bbb7-3269-9460dea28db2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a46094090df64991a07021fa32607887 as hdfs://localhost:32873/user/jenkins/test-data/541b165e-30b6-bbb7-3269-9460dea28db2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/a46094090df64991a07021fa32607887 2024-11-16T12:47:07,253 INFO [M:0;0450ab8807f5:39837 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32873/user/jenkins/test-data/541b165e-30b6-bbb7-3269-9460dea28db2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/a46094090df64991a07021fa32607887, entries=3, sequenceid=29, filesize=5.2 K 2024-11-16T12:47:07,255 DEBUG [M:0;0450ab8807f5:39837 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32873/user/jenkins/test-data/541b165e-30b6-bbb7-3269-9460dea28db2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/54450193ba574c81863837b0c8ed7016 as hdfs://localhost:32873/user/jenkins/test-data/541b165e-30b6-bbb7-3269-9460dea28db2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/54450193ba574c81863837b0c8ed7016 2024-11-16T12:47:07,260 INFO [M:0;0450ab8807f5:39837 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32873/user/jenkins/test-data/541b165e-30b6-bbb7-3269-9460dea28db2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/54450193ba574c81863837b0c8ed7016, entries=1, sequenceid=29, filesize=5.0 K 2024-11-16T12:47:07,262 DEBUG [M:0;0450ab8807f5:39837 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32873/user/jenkins/test-data/541b165e-30b6-bbb7-3269-9460dea28db2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/0ed345d3c81043de8afa19551bfa5c59 as hdfs://localhost:32873/user/jenkins/test-data/541b165e-30b6-bbb7-3269-9460dea28db2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/0ed345d3c81043de8afa19551bfa5c59 2024-11-16T12:47:07,267 INFO [M:0;0450ab8807f5:39837 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32873/user/jenkins/test-data/541b165e-30b6-bbb7-3269-9460dea28db2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/0ed345d3c81043de8afa19551bfa5c59, entries=1, sequenceid=29, filesize=4.9 K 2024-11-16T12:47:07,268 INFO [M:0;0450ab8807f5:39837 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 149ms, sequenceid=29, compaction requested=false 2024-11-16T12:47:07,270 INFO [M:0;0450ab8807f5:39837 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T12:47:07,270 DEBUG [M:0;0450ab8807f5:39837 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731761227119Disabling compacts and flushes for region at 1731761227119Disabling writes for close at 1731761227119Obtaining lock to block concurrent updates at 1731761227119Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731761227119Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1731761227120 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731761227120Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731761227121 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731761227138 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731761227138Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731761227150 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731761227167 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731761227167Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731761227180 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731761227196 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731761227196Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731761227208 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731761227226 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731761227226Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@36371ca3: reopening flushed file at 1731761227238 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@9ed115a: reopening flushed file at 1731761227247 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@779324e: reopening flushed file at 1731761227254 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2bcf6783: reopening flushed file at 1731761227261 (+7 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 149ms, sequenceid=29, compaction requested=false at 1731761227269 (+8 ms)Writing region close event to WAL at 1731761227270 (+1 ms)Closed at 1731761227270 2024-11-16T12:47:07,271 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:47:07,271 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:47:07,271 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:47:07,271 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:47:07,271 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:47:07,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43477 is added to blk_1073741830_1006 (size=10311) 2024-11-16T12:47:07,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42239 is added to blk_1073741830_1006 (size=10311) 2024-11-16T12:47:07,274 INFO [M:0;0450ab8807f5:39837 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-16T12:47:07,274 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-16T12:47:07,274 INFO [M:0;0450ab8807f5:39837 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39837 2024-11-16T12:47:07,274 INFO [M:0;0450ab8807f5:39837 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-16T12:47:07,401 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39837-0x10144f8d0d10000, quorum=127.0.0.1:55508, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T12:47:07,401 INFO [M:0;0450ab8807f5:39837 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-16T12:47:07,401 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39837-0x10144f8d0d10000, quorum=127.0.0.1:55508, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T12:47:07,404 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2fa3cb60{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T12:47:07,405 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@18662151{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T12:47:07,405 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T12:47:07,405 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1a2066f8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T12:47:07,405 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2ff5ef6c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2b8f2a9f-4597-66f8-137a-f9a3ba8c2be4/hadoop.log.dir/,STOPPED} 2024-11-16T12:47:07,406 WARN [BP-45895880-172.17.0.2-1731761223400 heartbeating to localhost/127.0.0.1:32873 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T12:47:07,406 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T12:47:07,407 WARN [BP-45895880-172.17.0.2-1731761223400 heartbeating to localhost/127.0.0.1:32873 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-45895880-172.17.0.2-1731761223400 (Datanode Uuid bc9fe41a-6701-4714-95b0-aeb1aa0b868b) service to localhost/127.0.0.1:32873 2024-11-16T12:47:07,407 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T12:47:07,407 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2b8f2a9f-4597-66f8-137a-f9a3ba8c2be4/cluster_a8c24dd2-409a-e7eb-c8dd-a3e580eeb88f/data/data3/current/BP-45895880-172.17.0.2-1731761223400 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T12:47:07,407 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2b8f2a9f-4597-66f8-137a-f9a3ba8c2be4/cluster_a8c24dd2-409a-e7eb-c8dd-a3e580eeb88f/data/data4/current/BP-45895880-172.17.0.2-1731761223400 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T12:47:07,408 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T12:47:07,410 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@101cd95b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T12:47:07,411 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@260e0b79{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T12:47:07,411 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T12:47:07,411 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2f2378c9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T12:47:07,411 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7d119060{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2b8f2a9f-4597-66f8-137a-f9a3ba8c2be4/hadoop.log.dir/,STOPPED} 2024-11-16T12:47:07,412 WARN [BP-45895880-172.17.0.2-1731761223400 heartbeating to localhost/127.0.0.1:32873 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T12:47:07,412 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T12:47:07,412 WARN [BP-45895880-172.17.0.2-1731761223400 heartbeating to localhost/127.0.0.1:32873 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-45895880-172.17.0.2-1731761223400 (Datanode Uuid db346079-5df1-40f8-a291-e2008441fe1b) service to localhost/127.0.0.1:32873 2024-11-16T12:47:07,412 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T12:47:07,413 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2b8f2a9f-4597-66f8-137a-f9a3ba8c2be4/cluster_a8c24dd2-409a-e7eb-c8dd-a3e580eeb88f/data/data1/current/BP-45895880-172.17.0.2-1731761223400 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T12:47:07,413 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2b8f2a9f-4597-66f8-137a-f9a3ba8c2be4/cluster_a8c24dd2-409a-e7eb-c8dd-a3e580eeb88f/data/data2/current/BP-45895880-172.17.0.2-1731761223400 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T12:47:07,413 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T12:47:07,419 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@194f043a{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-16T12:47:07,420 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4d974b8f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T12:47:07,420 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T12:47:07,420 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@78fa6004{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T12:47:07,421 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@15ba4d19{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2b8f2a9f-4597-66f8-137a-f9a3ba8c2be4/hadoop.log.dir/,STOPPED} 2024-11-16T12:47:07,427 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-16T12:47:07,444 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-16T12:47:07,445 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-16T12:47:07,445 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2b8f2a9f-4597-66f8-137a-f9a3ba8c2be4/hadoop.log.dir so I do NOT create it in target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6 2024-11-16T12:47:07,445 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2b8f2a9f-4597-66f8-137a-f9a3ba8c2be4/hadoop.tmp.dir so I do NOT create it in target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6 2024-11-16T12:47:07,445 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/cluster_7a19b596-aa5b-5866-3868-b2473a5bf7fd, deleteOnExit=true 2024-11-16T12:47:07,445 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-16T12:47:07,445 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/test.cache.data in system properties and HBase conf 2024-11-16T12:47:07,445 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/hadoop.tmp.dir in system properties and HBase conf 2024-11-16T12:47:07,445 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/hadoop.log.dir in system properties and HBase conf 2024-11-16T12:47:07,445 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-16T12:47:07,445 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-16T12:47:07,445 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-16T12:47:07,446 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-16T12:47:07,446 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-16T12:47:07,446 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-16T12:47:07,446 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-16T12:47:07,446 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-16T12:47:07,446 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-16T12:47:07,446 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-16T12:47:07,446 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-16T12:47:07,446 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-16T12:47:07,446 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-16T12:47:07,447 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/nfs.dump.dir in system properties and HBase conf 2024-11-16T12:47:07,447 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/java.io.tmpdir in system properties and HBase conf 2024-11-16T12:47:07,447 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-16T12:47:07,447 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-16T12:47:07,447 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-16T12:47:07,458 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-16T12:47:07,585 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:47:07,593 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:47:07,691 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-16T12:47:07,693 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:47:07,710 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:47:07,712 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:47:07,713 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:47:07,742 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T12:47:07,748 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T12:47:07,749 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T12:47:07,749 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T12:47:07,750 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-16T12:47:07,750 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T12:47:07,751 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7a9b2a9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/hadoop.log.dir/,AVAILABLE} 2024-11-16T12:47:07,751 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@229a8eec{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T12:47:07,848 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4549eece{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/java.io.tmpdir/jetty-localhost-37935-hadoop-hdfs-3_4_1-tests_jar-_-any-12545381331914224369/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-16T12:47:07,848 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@21d5e4af{HTTP/1.1, (http/1.1)}{localhost:37935} 2024-11-16T12:47:07,848 INFO [Time-limited test {}] server.Server(415): Started @106735ms 2024-11-16T12:47:07,863 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-16T12:47:07,925 INFO [regionserver/0450ab8807f5:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-16T12:47:08,080 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T12:47:08,085 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T12:47:08,087 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T12:47:08,087 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T12:47:08,087 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-16T12:47:08,087 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@131ba134{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/hadoop.log.dir/,AVAILABLE} 2024-11-16T12:47:08,088 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@35026af9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T12:47:08,195 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@461a42e8{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/java.io.tmpdir/jetty-localhost-43565-hadoop-hdfs-3_4_1-tests_jar-_-any-13081549588862544169/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T12:47:08,195 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5289966b{HTTP/1.1, (http/1.1)}{localhost:43565} 2024-11-16T12:47:08,195 INFO [Time-limited test {}] server.Server(415): Started @107082ms 2024-11-16T12:47:08,197 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T12:47:08,229 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T12:47:08,234 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T12:47:08,234 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T12:47:08,235 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T12:47:08,235 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-16T12:47:08,235 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@175f7b69{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/hadoop.log.dir/,AVAILABLE} 2024-11-16T12:47:08,236 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5f988831{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T12:47:08,332 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@781791f2{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/java.io.tmpdir/jetty-localhost-36181-hadoop-hdfs-3_4_1-tests_jar-_-any-867370272403225510/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T12:47:08,333 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2273073a{HTTP/1.1, (http/1.1)}{localhost:36181} 2024-11-16T12:47:08,333 INFO [Time-limited test {}] server.Server(415): Started @107220ms 2024-11-16T12:47:08,334 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T12:47:08,874 WARN [Thread-672 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/cluster_7a19b596-aa5b-5866-3868-b2473a5bf7fd/data/data1/current/BP-886867469-172.17.0.2-1731761227469/current, will proceed with Du for space computation calculation, 2024-11-16T12:47:08,874 WARN [Thread-673 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/cluster_7a19b596-aa5b-5866-3868-b2473a5bf7fd/data/data2/current/BP-886867469-172.17.0.2-1731761227469/current, will proceed with Du for space computation calculation, 2024-11-16T12:47:08,889 WARN [Thread-636 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T12:47:08,892 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd8c587b5783ea83d with lease ID 0x92d8eda596404388: Processing first storage report for DS-7068dce2-5d81-4029-bc2f-296d6fc5c0df from datanode DatanodeRegistration(127.0.0.1:34023, datanodeUuid=92e12824-0524-4115-9c7f-83686a2d39fd, infoPort=38523, infoSecurePort=0, ipcPort=40715, storageInfo=lv=-57;cid=testClusterID;nsid=2033418441;c=1731761227469) 2024-11-16T12:47:08,892 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd8c587b5783ea83d with lease ID 0x92d8eda596404388: from storage DS-7068dce2-5d81-4029-bc2f-296d6fc5c0df node DatanodeRegistration(127.0.0.1:34023, datanodeUuid=92e12824-0524-4115-9c7f-83686a2d39fd, infoPort=38523, infoSecurePort=0, ipcPort=40715, storageInfo=lv=-57;cid=testClusterID;nsid=2033418441;c=1731761227469), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-16T12:47:08,892 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd8c587b5783ea83d with lease ID 0x92d8eda596404388: Processing first storage report for DS-35df04ab-9238-45c5-8fa1-118635288851 from datanode DatanodeRegistration(127.0.0.1:34023, datanodeUuid=92e12824-0524-4115-9c7f-83686a2d39fd, infoPort=38523, infoSecurePort=0, ipcPort=40715, storageInfo=lv=-57;cid=testClusterID;nsid=2033418441;c=1731761227469) 2024-11-16T12:47:08,893 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd8c587b5783ea83d with lease ID 0x92d8eda596404388: from storage DS-35df04ab-9238-45c5-8fa1-118635288851 node DatanodeRegistration(127.0.0.1:34023, datanodeUuid=92e12824-0524-4115-9c7f-83686a2d39fd, infoPort=38523, infoSecurePort=0, ipcPort=40715, storageInfo=lv=-57;cid=testClusterID;nsid=2033418441;c=1731761227469), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T12:47:09,038 WARN [Thread-683 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/cluster_7a19b596-aa5b-5866-3868-b2473a5bf7fd/data/data3/current/BP-886867469-172.17.0.2-1731761227469/current, will proceed with Du for space computation calculation, 2024-11-16T12:47:09,039 WARN [Thread-684 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/cluster_7a19b596-aa5b-5866-3868-b2473a5bf7fd/data/data4/current/BP-886867469-172.17.0.2-1731761227469/current, will proceed with Du for space computation calculation, 2024-11-16T12:47:09,059 WARN [Thread-659 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T12:47:09,061 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd8b174719a16425a with lease ID 0x92d8eda596404389: Processing first storage report for DS-8258fc71-cd75-42d7-8209-2e094c48164b from datanode DatanodeRegistration(127.0.0.1:41753, datanodeUuid=fc9bb905-c3c0-4f61-a035-12b1008d0723, infoPort=33799, infoSecurePort=0, ipcPort=36053, storageInfo=lv=-57;cid=testClusterID;nsid=2033418441;c=1731761227469) 2024-11-16T12:47:09,061 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd8b174719a16425a with lease ID 0x92d8eda596404389: from storage DS-8258fc71-cd75-42d7-8209-2e094c48164b node DatanodeRegistration(127.0.0.1:41753, datanodeUuid=fc9bb905-c3c0-4f61-a035-12b1008d0723, infoPort=33799, infoSecurePort=0, ipcPort=36053, storageInfo=lv=-57;cid=testClusterID;nsid=2033418441;c=1731761227469), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T12:47:09,062 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd8b174719a16425a with lease ID 0x92d8eda596404389: Processing first storage report for DS-3c906e86-911b-4015-ae0f-eeb64ca7d21e from datanode DatanodeRegistration(127.0.0.1:41753, datanodeUuid=fc9bb905-c3c0-4f61-a035-12b1008d0723, infoPort=33799, infoSecurePort=0, ipcPort=36053, storageInfo=lv=-57;cid=testClusterID;nsid=2033418441;c=1731761227469) 2024-11-16T12:47:09,062 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd8b174719a16425a with lease ID 0x92d8eda596404389: from storage DS-3c906e86-911b-4015-ae0f-eeb64ca7d21e node DatanodeRegistration(127.0.0.1:41753, datanodeUuid=fc9bb905-c3c0-4f61-a035-12b1008d0723, infoPort=33799, infoSecurePort=0, ipcPort=36053, storageInfo=lv=-57;cid=testClusterID;nsid=2033418441;c=1731761227469), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T12:47:09,065 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6 2024-11-16T12:47:09,069 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/cluster_7a19b596-aa5b-5866-3868-b2473a5bf7fd/zookeeper_0, clientPort=62408, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/cluster_7a19b596-aa5b-5866-3868-b2473a5bf7fd/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/cluster_7a19b596-aa5b-5866-3868-b2473a5bf7fd/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-16T12:47:09,069 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=62408 2024-11-16T12:47:09,070 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T12:47:09,071 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T12:47:09,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41753 is added to blk_1073741825_1001 (size=7) 2024-11-16T12:47:09,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34023 is added to blk_1073741825_1001 (size=7) 2024-11-16T12:47:09,082 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628 with version=8 2024-11-16T12:47:09,082 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/hbase-staging 2024-11-16T12:47:09,084 INFO [Time-limited test {}] client.ConnectionUtils(128): master/0450ab8807f5:0 server-side Connection retries=45 2024-11-16T12:47:09,084 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T12:47:09,084 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-16T12:47:09,084 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-16T12:47:09,084 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T12:47:09,084 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-16T12:47:09,084 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-16T12:47:09,084 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-16T12:47:09,085 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35227 2024-11-16T12:47:09,086 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:35227 connecting to ZooKeeper ensemble=127.0.0.1:62408 2024-11-16T12:47:09,140 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:352270x0, quorum=127.0.0.1:62408, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-16T12:47:09,141 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:35227-0x10144f8dfd10000 connected 2024-11-16T12:47:09,217 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T12:47:09,219 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T12:47:09,222 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35227-0x10144f8dfd10000, quorum=127.0.0.1:62408, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T12:47:09,222 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628, hbase.cluster.distributed=false 2024-11-16T12:47:09,224 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35227-0x10144f8dfd10000, quorum=127.0.0.1:62408, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-16T12:47:09,225 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35227 2024-11-16T12:47:09,225 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35227 2024-11-16T12:47:09,225 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35227 2024-11-16T12:47:09,225 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35227 2024-11-16T12:47:09,226 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35227 2024-11-16T12:47:09,240 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/0450ab8807f5:0 server-side Connection retries=45 2024-11-16T12:47:09,240 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T12:47:09,241 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-16T12:47:09,241 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-16T12:47:09,241 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T12:47:09,241 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-16T12:47:09,241 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-16T12:47:09,241 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-16T12:47:09,242 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33749 2024-11-16T12:47:09,243 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:33749 connecting to ZooKeeper ensemble=127.0.0.1:62408 2024-11-16T12:47:09,244 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T12:47:09,245 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T12:47:09,258 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:337490x0, quorum=127.0.0.1:62408, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-16T12:47:09,259 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:337490x0, quorum=127.0.0.1:62408, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T12:47:09,259 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:33749-0x10144f8dfd10001 connected 2024-11-16T12:47:09,259 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-16T12:47:09,259 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-16T12:47:09,260 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33749-0x10144f8dfd10001, quorum=127.0.0.1:62408, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-16T12:47:09,261 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33749-0x10144f8dfd10001, quorum=127.0.0.1:62408, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-16T12:47:09,262 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33749 2024-11-16T12:47:09,262 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33749 2024-11-16T12:47:09,262 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33749 2024-11-16T12:47:09,262 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33749 2024-11-16T12:47:09,264 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33749 2024-11-16T12:47:09,276 DEBUG [M:0;0450ab8807f5:35227 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;0450ab8807f5:35227 2024-11-16T12:47:09,276 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/0450ab8807f5,35227,1731761229084 2024-11-16T12:47:09,283 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33749-0x10144f8dfd10001, quorum=127.0.0.1:62408, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T12:47:09,283 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35227-0x10144f8dfd10000, quorum=127.0.0.1:62408, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T12:47:09,283 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35227-0x10144f8dfd10000, quorum=127.0.0.1:62408, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/0450ab8807f5,35227,1731761229084 2024-11-16T12:47:09,291 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33749-0x10144f8dfd10001, quorum=127.0.0.1:62408, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-16T12:47:09,291 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35227-0x10144f8dfd10000, quorum=127.0.0.1:62408, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:47:09,291 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33749-0x10144f8dfd10001, quorum=127.0.0.1:62408, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:47:09,292 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35227-0x10144f8dfd10000, quorum=127.0.0.1:62408, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-16T12:47:09,292 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/0450ab8807f5,35227,1731761229084 from backup master directory 2024-11-16T12:47:09,299 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33749-0x10144f8dfd10001, quorum=127.0.0.1:62408, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T12:47:09,299 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35227-0x10144f8dfd10000, quorum=127.0.0.1:62408, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/0450ab8807f5,35227,1731761229084 2024-11-16T12:47:09,300 WARN [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-16T12:47:09,300 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35227-0x10144f8dfd10000, quorum=127.0.0.1:62408, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T12:47:09,300 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=0450ab8807f5,35227,1731761229084 2024-11-16T12:47:09,305 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/hbase.id] with ID: 6f84f2fd-d9b2-41cc-988c-9e76f5b3639a 2024-11-16T12:47:09,305 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/.tmp/hbase.id 2024-11-16T12:47:09,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34023 is added to blk_1073741826_1002 (size=42) 2024-11-16T12:47:09,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41753 is added to blk_1073741826_1002 (size=42) 2024-11-16T12:47:09,313 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/.tmp/hbase.id]:[hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/hbase.id] 2024-11-16T12:47:09,328 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T12:47:09,328 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-16T12:47:09,330 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-16T12:47:09,341 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35227-0x10144f8dfd10000, quorum=127.0.0.1:62408, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:47:09,341 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33749-0x10144f8dfd10001, quorum=127.0.0.1:62408, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:47:09,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41753 is added to blk_1073741827_1003 (size=196) 2024-11-16T12:47:09,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34023 is added to blk_1073741827_1003 (size=196) 2024-11-16T12:47:09,353 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-16T12:47:09,354 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-16T12:47:09,354 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T12:47:09,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34023 is added to blk_1073741828_1004 (size=1189) 2024-11-16T12:47:09,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41753 is added to blk_1073741828_1004 (size=1189) 2024-11-16T12:47:09,364 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/MasterData/data/master/store 2024-11-16T12:47:09,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34023 is added to blk_1073741829_1005 (size=34) 2024-11-16T12:47:09,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41753 is added to blk_1073741829_1005 (size=34) 2024-11-16T12:47:09,372 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T12:47:09,372 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-16T12:47:09,372 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T12:47:09,372 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T12:47:09,372 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-16T12:47:09,372 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T12:47:09,373 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T12:47:09,373 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731761229372Disabling compacts and flushes for region at 1731761229372Disabling writes for close at 1731761229372Writing region close event to WAL at 1731761229373 (+1 ms)Closed at 1731761229373 2024-11-16T12:47:09,373 WARN [master/0450ab8807f5:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/MasterData/data/master/store/.initializing 2024-11-16T12:47:09,373 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/MasterData/WALs/0450ab8807f5,35227,1731761229084 2024-11-16T12:47:09,377 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0450ab8807f5%2C35227%2C1731761229084, suffix=, logDir=hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/MasterData/WALs/0450ab8807f5,35227,1731761229084, archiveDir=hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/MasterData/oldWALs, maxLogs=10 2024-11-16T12:47:09,377 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0450ab8807f5%2C35227%2C1731761229084.1731761229377 2024-11-16T12:47:09,383 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/MasterData/WALs/0450ab8807f5,35227,1731761229084/0450ab8807f5%2C35227%2C1731761229084.1731761229377 2024-11-16T12:47:09,389 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33799:33799),(127.0.0.1/127.0.0.1:38523:38523)] 2024-11-16T12:47:09,392 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-16T12:47:09,392 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T12:47:09,392 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T12:47:09,392 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T12:47:09,394 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T12:47:09,395 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-16T12:47:09,396 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:47:09,396 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T12:47:09,396 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T12:47:09,398 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-16T12:47:09,398 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:47:09,398 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T12:47:09,399 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T12:47:09,400 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-16T12:47:09,400 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:47:09,401 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T12:47:09,401 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T12:47:09,402 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-16T12:47:09,402 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:47:09,403 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T12:47:09,403 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T12:47:09,404 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-16T12:47:09,404 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-16T12:47:09,406 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T12:47:09,406 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T12:47:09,407 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-16T12:47:09,408 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T12:47:09,411 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T12:47:09,412 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=707439, jitterRate=-0.10044525563716888}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-16T12:47:09,413 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731761229392Initializing all the Stores at 1731761229393 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731761229393Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731761229394 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731761229394Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731761229394Cleaning up temporary data from old regions at 1731761229406 (+12 ms)Region opened successfully at 1731761229413 (+7 ms) 2024-11-16T12:47:09,413 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-16T12:47:09,417 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@500f1721, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=0450ab8807f5/172.17.0.2:0 2024-11-16T12:47:09,418 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-16T12:47:09,418 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-16T12:47:09,419 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-16T12:47:09,419 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-16T12:47:09,419 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-16T12:47:09,420 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-16T12:47:09,420 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-16T12:47:09,423 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-16T12:47:09,424 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35227-0x10144f8dfd10000, quorum=127.0.0.1:62408, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-16T12:47:09,458 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-16T12:47:09,458 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-16T12:47:09,459 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35227-0x10144f8dfd10000, quorum=127.0.0.1:62408, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-16T12:47:09,466 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-16T12:47:09,467 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-16T12:47:09,468 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35227-0x10144f8dfd10000, quorum=127.0.0.1:62408, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-16T12:47:09,474 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-16T12:47:09,476 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35227-0x10144f8dfd10000, quorum=127.0.0.1:62408, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-16T12:47:09,483 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-16T12:47:09,486 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35227-0x10144f8dfd10000, quorum=127.0.0.1:62408, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-16T12:47:09,491 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-16T12:47:09,499 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33749-0x10144f8dfd10001, quorum=127.0.0.1:62408, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-16T12:47:09,500 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33749-0x10144f8dfd10001, quorum=127.0.0.1:62408, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:47:09,500 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35227-0x10144f8dfd10000, quorum=127.0.0.1:62408, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-16T12:47:09,500 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35227-0x10144f8dfd10000, quorum=127.0.0.1:62408, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:47:09,500 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=0450ab8807f5,35227,1731761229084, sessionid=0x10144f8dfd10000, setting cluster-up flag (Was=false) 2024-11-16T12:47:09,516 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35227-0x10144f8dfd10000, quorum=127.0.0.1:62408, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:47:09,516 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33749-0x10144f8dfd10001, quorum=127.0.0.1:62408, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:47:09,541 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-16T12:47:09,544 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=0450ab8807f5,35227,1731761229084 2024-11-16T12:47:09,566 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33749-0x10144f8dfd10001, quorum=127.0.0.1:62408, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:47:09,566 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35227-0x10144f8dfd10000, quorum=127.0.0.1:62408, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:47:09,591 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-16T12:47:09,593 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=0450ab8807f5,35227,1731761229084 2024-11-16T12:47:09,595 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-16T12:47:09,597 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-16T12:47:09,597 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-16T12:47:09,598 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-16T12:47:09,598 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 0450ab8807f5,35227,1731761229084 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-16T12:47:09,600 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/0450ab8807f5:0, corePoolSize=5, maxPoolSize=5 2024-11-16T12:47:09,600 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/0450ab8807f5:0, corePoolSize=5, maxPoolSize=5 2024-11-16T12:47:09,600 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/0450ab8807f5:0, corePoolSize=5, maxPoolSize=5 2024-11-16T12:47:09,600 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/0450ab8807f5:0, corePoolSize=5, maxPoolSize=5 2024-11-16T12:47:09,601 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/0450ab8807f5:0, corePoolSize=10, maxPoolSize=10 2024-11-16T12:47:09,601 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:47:09,601 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/0450ab8807f5:0, corePoolSize=2, maxPoolSize=2 2024-11-16T12:47:09,601 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:47:09,602 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731761259602 2024-11-16T12:47:09,602 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-16T12:47:09,602 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-16T12:47:09,602 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-16T12:47:09,602 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-16T12:47:09,602 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-16T12:47:09,602 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-16T12:47:09,602 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-16T12:47:09,603 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T12:47:09,603 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-16T12:47:09,605 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:47:09,605 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-16T12:47:09,607 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-16T12:47:09,607 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-16T12:47:09,607 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-16T12:47:09,610 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-16T12:47:09,611 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-16T12:47:09,611 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/0450ab8807f5:0:becomeActiveMaster-HFileCleaner.large.0-1731761229611,5,FailOnTimeoutGroup] 2024-11-16T12:47:09,611 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/0450ab8807f5:0:becomeActiveMaster-HFileCleaner.small.0-1731761229611,5,FailOnTimeoutGroup] 2024-11-16T12:47:09,611 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-16T12:47:09,611 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-16T12:47:09,612 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-16T12:47:09,612 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-16T12:47:09,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41753 is added to blk_1073741831_1007 (size=1321) 2024-11-16T12:47:09,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34023 is added to blk_1073741831_1007 (size=1321) 2024-11-16T12:47:09,618 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-16T12:47:09,618 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628 2024-11-16T12:47:09,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34023 is added to blk_1073741832_1008 (size=32) 2024-11-16T12:47:09,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41753 is added to blk_1073741832_1008 (size=32) 2024-11-16T12:47:09,629 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T12:47:09,630 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-16T12:47:09,632 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-16T12:47:09,632 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:47:09,633 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T12:47:09,633 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-16T12:47:09,635 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-16T12:47:09,635 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:47:09,636 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T12:47:09,636 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-16T12:47:09,638 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-16T12:47:09,638 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:47:09,638 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T12:47:09,639 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-16T12:47:09,640 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-16T12:47:09,641 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:47:09,641 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T12:47:09,641 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-16T12:47:09,642 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/data/hbase/meta/1588230740 2024-11-16T12:47:09,642 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/data/hbase/meta/1588230740 2024-11-16T12:47:09,644 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-16T12:47:09,644 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-16T12:47:09,645 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-16T12:47:09,646 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-16T12:47:09,648 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T12:47:09,649 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=864505, jitterRate=0.09927575290203094}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-16T12:47:09,650 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731761229629Initializing all the Stores at 1731761229630 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731761229630Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731761229630Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731761229630Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731761229630Cleaning up temporary data from old regions at 1731761229644 (+14 ms)Region opened successfully at 1731761229650 (+6 ms) 2024-11-16T12:47:09,650 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-16T12:47:09,650 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-16T12:47:09,650 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-16T12:47:09,651 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-16T12:47:09,651 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-16T12:47:09,651 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-16T12:47:09,651 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731761229650Disabling compacts and flushes for region at 1731761229650Disabling writes for close at 1731761229651 (+1 ms)Writing region close event to WAL at 1731761229651Closed at 1731761229651 2024-11-16T12:47:09,653 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T12:47:09,653 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-16T12:47:09,653 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-16T12:47:09,655 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-16T12:47:09,656 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-16T12:47:09,667 INFO [RS:0;0450ab8807f5:33749 {}] regionserver.HRegionServer(746): ClusterId : 6f84f2fd-d9b2-41cc-988c-9e76f5b3639a 2024-11-16T12:47:09,667 DEBUG [RS:0;0450ab8807f5:33749 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-16T12:47:09,684 DEBUG [RS:0;0450ab8807f5:33749 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-16T12:47:09,684 DEBUG [RS:0;0450ab8807f5:33749 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-16T12:47:09,692 DEBUG [RS:0;0450ab8807f5:33749 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-16T12:47:09,693 DEBUG [RS:0;0450ab8807f5:33749 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@48d15746, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=0450ab8807f5/172.17.0.2:0 2024-11-16T12:47:09,704 DEBUG [RS:0;0450ab8807f5:33749 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;0450ab8807f5:33749 2024-11-16T12:47:09,704 INFO [RS:0;0450ab8807f5:33749 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-16T12:47:09,704 INFO [RS:0;0450ab8807f5:33749 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-16T12:47:09,704 DEBUG [RS:0;0450ab8807f5:33749 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-16T12:47:09,705 INFO [RS:0;0450ab8807f5:33749 {}] regionserver.HRegionServer(2659): reportForDuty to master=0450ab8807f5,35227,1731761229084 with port=33749, startcode=1731761229240 2024-11-16T12:47:09,706 DEBUG [RS:0;0450ab8807f5:33749 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-16T12:47:09,708 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33081, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-16T12:47:09,708 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35227 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 0450ab8807f5,33749,1731761229240 2024-11-16T12:47:09,708 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35227 {}] master.ServerManager(517): Registering regionserver=0450ab8807f5,33749,1731761229240 2024-11-16T12:47:09,710 DEBUG [RS:0;0450ab8807f5:33749 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628 2024-11-16T12:47:09,710 DEBUG [RS:0;0450ab8807f5:33749 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:39393 2024-11-16T12:47:09,711 DEBUG [RS:0;0450ab8807f5:33749 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-16T12:47:09,716 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35227-0x10144f8dfd10000, quorum=127.0.0.1:62408, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-16T12:47:09,717 DEBUG [RS:0;0450ab8807f5:33749 {}] zookeeper.ZKUtil(111): regionserver:33749-0x10144f8dfd10001, quorum=127.0.0.1:62408, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/0450ab8807f5,33749,1731761229240 2024-11-16T12:47:09,717 WARN [RS:0;0450ab8807f5:33749 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-16T12:47:09,717 INFO [RS:0;0450ab8807f5:33749 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T12:47:09,717 DEBUG [RS:0;0450ab8807f5:33749 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240 2024-11-16T12:47:09,717 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [0450ab8807f5,33749,1731761229240] 2024-11-16T12:47:09,721 INFO [RS:0;0450ab8807f5:33749 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-16T12:47:09,723 INFO [RS:0;0450ab8807f5:33749 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-16T12:47:09,724 INFO [RS:0;0450ab8807f5:33749 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-16T12:47:09,724 INFO [RS:0;0450ab8807f5:33749 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T12:47:09,724 INFO [RS:0;0450ab8807f5:33749 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-16T12:47:09,725 INFO [RS:0;0450ab8807f5:33749 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-16T12:47:09,725 INFO [RS:0;0450ab8807f5:33749 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-16T12:47:09,725 DEBUG [RS:0;0450ab8807f5:33749 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:47:09,725 DEBUG [RS:0;0450ab8807f5:33749 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:47:09,725 DEBUG [RS:0;0450ab8807f5:33749 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:47:09,725 DEBUG [RS:0;0450ab8807f5:33749 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:47:09,725 DEBUG [RS:0;0450ab8807f5:33749 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:47:09,725 DEBUG [RS:0;0450ab8807f5:33749 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/0450ab8807f5:0, corePoolSize=2, maxPoolSize=2 2024-11-16T12:47:09,725 DEBUG [RS:0;0450ab8807f5:33749 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:47:09,726 DEBUG [RS:0;0450ab8807f5:33749 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:47:09,726 DEBUG [RS:0;0450ab8807f5:33749 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:47:09,726 DEBUG [RS:0;0450ab8807f5:33749 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:47:09,726 DEBUG [RS:0;0450ab8807f5:33749 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:47:09,726 DEBUG [RS:0;0450ab8807f5:33749 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:47:09,726 DEBUG [RS:0;0450ab8807f5:33749 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/0450ab8807f5:0, corePoolSize=3, maxPoolSize=3 2024-11-16T12:47:09,726 DEBUG [RS:0;0450ab8807f5:33749 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/0450ab8807f5:0, corePoolSize=3, maxPoolSize=3 2024-11-16T12:47:09,726 INFO [RS:0;0450ab8807f5:33749 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T12:47:09,727 INFO [RS:0;0450ab8807f5:33749 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T12:47:09,727 INFO [RS:0;0450ab8807f5:33749 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T12:47:09,727 INFO [RS:0;0450ab8807f5:33749 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-16T12:47:09,727 INFO [RS:0;0450ab8807f5:33749 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-16T12:47:09,727 INFO [RS:0;0450ab8807f5:33749 {}] hbase.ChoreService(168): Chore ScheduledChore name=0450ab8807f5,33749,1731761229240-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-16T12:47:09,744 INFO [RS:0;0450ab8807f5:33749 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-16T12:47:09,744 INFO [RS:0;0450ab8807f5:33749 {}] hbase.ChoreService(168): Chore ScheduledChore name=0450ab8807f5,33749,1731761229240-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T12:47:09,744 INFO [RS:0;0450ab8807f5:33749 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T12:47:09,744 INFO [RS:0;0450ab8807f5:33749 {}] regionserver.Replication(171): 0450ab8807f5,33749,1731761229240 started 2024-11-16T12:47:09,758 INFO [RS:0;0450ab8807f5:33749 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T12:47:09,758 INFO [RS:0;0450ab8807f5:33749 {}] regionserver.HRegionServer(1482): Serving as 0450ab8807f5,33749,1731761229240, RpcServer on 0450ab8807f5/172.17.0.2:33749, sessionid=0x10144f8dfd10001 2024-11-16T12:47:09,758 DEBUG [RS:0;0450ab8807f5:33749 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-16T12:47:09,758 DEBUG [RS:0;0450ab8807f5:33749 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 0450ab8807f5,33749,1731761229240 2024-11-16T12:47:09,758 DEBUG [RS:0;0450ab8807f5:33749 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '0450ab8807f5,33749,1731761229240' 2024-11-16T12:47:09,758 DEBUG [RS:0;0450ab8807f5:33749 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-16T12:47:09,759 DEBUG [RS:0;0450ab8807f5:33749 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-16T12:47:09,760 DEBUG [RS:0;0450ab8807f5:33749 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-16T12:47:09,760 DEBUG [RS:0;0450ab8807f5:33749 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-16T12:47:09,760 DEBUG [RS:0;0450ab8807f5:33749 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 0450ab8807f5,33749,1731761229240 2024-11-16T12:47:09,760 DEBUG [RS:0;0450ab8807f5:33749 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '0450ab8807f5,33749,1731761229240' 2024-11-16T12:47:09,760 DEBUG [RS:0;0450ab8807f5:33749 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-16T12:47:09,760 DEBUG [RS:0;0450ab8807f5:33749 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-16T12:47:09,761 DEBUG [RS:0;0450ab8807f5:33749 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-16T12:47:09,761 INFO [RS:0;0450ab8807f5:33749 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-16T12:47:09,761 INFO [RS:0;0450ab8807f5:33749 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-16T12:47:09,806 WARN [0450ab8807f5:35227 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-16T12:47:09,865 INFO [RS:0;0450ab8807f5:33749 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0450ab8807f5%2C33749%2C1731761229240, suffix=, logDir=hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240, archiveDir=hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/oldWALs, maxLogs=32 2024-11-16T12:47:09,866 INFO [RS:0;0450ab8807f5:33749 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0450ab8807f5%2C33749%2C1731761229240.1731761229866 2024-11-16T12:47:09,872 INFO [RS:0;0450ab8807f5:33749 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.1731761229866 2024-11-16T12:47:09,873 DEBUG [RS:0;0450ab8807f5:33749 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33799:33799),(127.0.0.1/127.0.0.1:38523:38523)] 2024-11-16T12:47:10,057 DEBUG [0450ab8807f5:35227 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-16T12:47:10,058 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=0450ab8807f5,33749,1731761229240 2024-11-16T12:47:10,062 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 0450ab8807f5,33749,1731761229240, state=OPENING 2024-11-16T12:47:10,090 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-16T12:47:10,099 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35227-0x10144f8dfd10000, quorum=127.0.0.1:62408, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:47:10,099 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33749-0x10144f8dfd10001, quorum=127.0.0.1:62408, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:47:10,100 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-16T12:47:10,100 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T12:47:10,100 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=0450ab8807f5,33749,1731761229240}] 2024-11-16T12:47:10,101 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T12:47:10,254 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-16T12:47:10,257 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49377, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-16T12:47:10,263 INFO [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-16T12:47:10,263 INFO [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T12:47:10,266 INFO [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0450ab8807f5%2C33749%2C1731761229240.meta, suffix=.meta, logDir=hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240, archiveDir=hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/oldWALs, maxLogs=32 2024-11-16T12:47:10,290 INFO [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta 2024-11-16T12:47:10,295 INFO [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta 2024-11-16T12:47:10,296 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33799:33799),(127.0.0.1/127.0.0.1:38523:38523)] 2024-11-16T12:47:10,297 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-16T12:47:10,297 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-16T12:47:10,297 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-16T12:47:10,297 INFO [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-16T12:47:10,297 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-16T12:47:10,297 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T12:47:10,297 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-16T12:47:10,297 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-16T12:47:10,299 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-16T12:47:10,300 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-16T12:47:10,300 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:47:10,300 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T12:47:10,301 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-16T12:47:10,301 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-16T12:47:10,301 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:47:10,302 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T12:47:10,302 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-16T12:47:10,303 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-16T12:47:10,303 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:47:10,303 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T12:47:10,303 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-16T12:47:10,304 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-16T12:47:10,304 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:47:10,304 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T12:47:10,305 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-16T12:47:10,305 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/data/hbase/meta/1588230740 2024-11-16T12:47:10,306 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/data/hbase/meta/1588230740 2024-11-16T12:47:10,309 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-16T12:47:10,309 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-16T12:47:10,310 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-16T12:47:10,312 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-16T12:47:10,313 INFO [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=859978, jitterRate=0.09351935982704163}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-16T12:47:10,313 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-16T12:47:10,314 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731761230298Writing region info on filesystem at 1731761230298Initializing all the Stores at 1731761230298Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731761230299 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731761230299Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731761230299Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731761230299Cleaning up temporary data from old regions at 1731761230309 (+10 ms)Running coprocessor post-open hooks at 1731761230313 (+4 ms)Region opened successfully at 1731761230314 (+1 ms) 2024-11-16T12:47:10,316 INFO [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731761230254 2024-11-16T12:47:10,319 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-16T12:47:10,319 INFO [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-16T12:47:10,320 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=0450ab8807f5,33749,1731761229240 2024-11-16T12:47:10,321 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 0450ab8807f5,33749,1731761229240, state=OPEN 2024-11-16T12:47:10,369 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35227-0x10144f8dfd10000, quorum=127.0.0.1:62408, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-16T12:47:10,369 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33749-0x10144f8dfd10001, quorum=127.0.0.1:62408, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-16T12:47:10,369 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=0450ab8807f5,33749,1731761229240 2024-11-16T12:47:10,369 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T12:47:10,369 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T12:47:10,372 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-16T12:47:10,372 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=0450ab8807f5,33749,1731761229240 in 269 msec 2024-11-16T12:47:10,374 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-16T12:47:10,374 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 719 msec 2024-11-16T12:47:10,375 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T12:47:10,375 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-16T12:47:10,377 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T12:47:10,377 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=0450ab8807f5,33749,1731761229240, seqNum=-1] 2024-11-16T12:47:10,377 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T12:47:10,378 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43459, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T12:47:10,385 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 787 msec 2024-11-16T12:47:10,385 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731761230385, completionTime=-1 2024-11-16T12:47:10,385 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-16T12:47:10,385 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-16T12:47:10,387 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-16T12:47:10,387 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731761290387 2024-11-16T12:47:10,387 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731761350387 2024-11-16T12:47:10,387 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-11-16T12:47:10,387 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0450ab8807f5,35227,1731761229084-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T12:47:10,387 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0450ab8807f5,35227,1731761229084-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T12:47:10,387 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0450ab8807f5,35227,1731761229084-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T12:47:10,387 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-0450ab8807f5:35227, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T12:47:10,387 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-16T12:47:10,388 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-16T12:47:10,390 DEBUG [master/0450ab8807f5:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-16T12:47:10,395 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.094sec 2024-11-16T12:47:10,395 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-16T12:47:10,395 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-16T12:47:10,395 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-16T12:47:10,395 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-16T12:47:10,395 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-16T12:47:10,395 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0450ab8807f5,35227,1731761229084-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-16T12:47:10,395 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0450ab8807f5,35227,1731761229084-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-16T12:47:10,398 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-16T12:47:10,398 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-16T12:47:10,398 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0450ab8807f5,35227,1731761229084-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T12:47:10,467 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6030d470, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T12:47:10,467 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 0450ab8807f5,35227,-1 for getting cluster id 2024-11-16T12:47:10,468 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-16T12:47:10,470 DEBUG [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '6f84f2fd-d9b2-41cc-988c-9e76f5b3639a' 2024-11-16T12:47:10,470 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-16T12:47:10,471 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "6f84f2fd-d9b2-41cc-988c-9e76f5b3639a" 2024-11-16T12:47:10,471 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@39dd05c1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T12:47:10,471 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [0450ab8807f5,35227,-1] 2024-11-16T12:47:10,471 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-16T12:47:10,472 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T12:47:10,474 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38636, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-16T12:47:10,475 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3ad9bbfc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T12:47:10,476 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T12:47:10,477 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=0450ab8807f5,33749,1731761229240, seqNum=-1] 2024-11-16T12:47:10,477 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T12:47:10,479 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42298, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T12:47:10,481 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=0450ab8807f5,35227,1731761229084 2024-11-16T12:47:10,481 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T12:47:10,484 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-16T12:47:10,502 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/0450ab8807f5:0 server-side Connection retries=45 2024-11-16T12:47:10,503 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T12:47:10,503 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-16T12:47:10,503 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-16T12:47:10,503 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T12:47:10,503 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-16T12:47:10,503 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-16T12:47:10,503 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-16T12:47:10,504 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35663 2024-11-16T12:47:10,505 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:35663 connecting to ZooKeeper ensemble=127.0.0.1:62408 2024-11-16T12:47:10,505 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T12:47:10,507 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T12:47:10,541 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:356630x0, quorum=127.0.0.1:62408, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-16T12:47:10,542 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:35663-0x10144f8dfd10002 connected 2024-11-16T12:47:10,542 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:35663-0x10144f8dfd10002, quorum=127.0.0.1:62408, baseZNode=/hbase Set watcher on existing znode=/hbase/running 2024-11-16T12:47:10,542 DEBUG [pool-381-thread-1 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: INIT 2024-11-16T12:47:10,543 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-16T12:47:10,544 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-16T12:47:10,545 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:35663-0x10144f8dfd10002, quorum=127.0.0.1:62408, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-16T12:47:10,548 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35663-0x10144f8dfd10002, quorum=127.0.0.1:62408, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-16T12:47:10,549 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35663 2024-11-16T12:47:10,550 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35663 2024-11-16T12:47:10,551 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35663 2024-11-16T12:47:10,552 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35663 2024-11-16T12:47:10,552 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35663 2024-11-16T12:47:10,554 INFO [RS:1;0450ab8807f5:35663 {}] regionserver.HRegionServer(746): ClusterId : 6f84f2fd-d9b2-41cc-988c-9e76f5b3639a 2024-11-16T12:47:10,554 DEBUG [RS:1;0450ab8807f5:35663 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-16T12:47:10,565 DEBUG [RS:1;0450ab8807f5:35663 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-16T12:47:10,565 DEBUG [RS:1;0450ab8807f5:35663 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-16T12:47:10,575 DEBUG [RS:1;0450ab8807f5:35663 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-16T12:47:10,576 DEBUG [RS:1;0450ab8807f5:35663 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7e5512b6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=0450ab8807f5/172.17.0.2:0 2024-11-16T12:47:10,589 DEBUG [RS:1;0450ab8807f5:35663 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;0450ab8807f5:35663 2024-11-16T12:47:10,589 INFO [RS:1;0450ab8807f5:35663 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-16T12:47:10,589 INFO [RS:1;0450ab8807f5:35663 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-16T12:47:10,589 DEBUG [RS:1;0450ab8807f5:35663 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-16T12:47:10,590 INFO [RS:1;0450ab8807f5:35663 {}] regionserver.HRegionServer(2659): reportForDuty to master=0450ab8807f5,35227,1731761229084 with port=35663, startcode=1731761230502 2024-11-16T12:47:10,590 DEBUG [RS:1;0450ab8807f5:35663 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-16T12:47:10,592 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48533, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-11-16T12:47:10,592 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35227 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 0450ab8807f5,35663,1731761230502 2024-11-16T12:47:10,592 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35227 {}] master.ServerManager(517): Registering regionserver=0450ab8807f5,35663,1731761230502 2024-11-16T12:47:10,594 DEBUG [RS:1;0450ab8807f5:35663 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628 2024-11-16T12:47:10,594 DEBUG [RS:1;0450ab8807f5:35663 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:39393 2024-11-16T12:47:10,594 DEBUG [RS:1;0450ab8807f5:35663 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-16T12:47:10,599 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35227-0x10144f8dfd10000, quorum=127.0.0.1:62408, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-16T12:47:10,600 DEBUG [RS:1;0450ab8807f5:35663 {}] zookeeper.ZKUtil(111): regionserver:35663-0x10144f8dfd10002, quorum=127.0.0.1:62408, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/0450ab8807f5,35663,1731761230502 2024-11-16T12:47:10,600 WARN [RS:1;0450ab8807f5:35663 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-16T12:47:10,600 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [0450ab8807f5,35663,1731761230502] 2024-11-16T12:47:10,600 INFO [RS:1;0450ab8807f5:35663 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T12:47:10,600 DEBUG [RS:1;0450ab8807f5:35663 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502 2024-11-16T12:47:10,603 INFO [RS:1;0450ab8807f5:35663 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-16T12:47:10,605 INFO [RS:1;0450ab8807f5:35663 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-16T12:47:10,605 INFO [RS:1;0450ab8807f5:35663 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-16T12:47:10,605 INFO [RS:1;0450ab8807f5:35663 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T12:47:10,605 INFO [RS:1;0450ab8807f5:35663 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-16T12:47:10,606 INFO [RS:1;0450ab8807f5:35663 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-16T12:47:10,606 INFO [RS:1;0450ab8807f5:35663 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-16T12:47:10,607 DEBUG [RS:1;0450ab8807f5:35663 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:47:10,607 DEBUG [RS:1;0450ab8807f5:35663 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:47:10,607 DEBUG [RS:1;0450ab8807f5:35663 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:47:10,607 DEBUG [RS:1;0450ab8807f5:35663 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:47:10,607 DEBUG [RS:1;0450ab8807f5:35663 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:47:10,607 DEBUG [RS:1;0450ab8807f5:35663 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/0450ab8807f5:0, corePoolSize=2, maxPoolSize=2 2024-11-16T12:47:10,607 DEBUG [RS:1;0450ab8807f5:35663 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:47:10,608 DEBUG [RS:1;0450ab8807f5:35663 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:47:10,608 DEBUG [RS:1;0450ab8807f5:35663 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:47:10,608 DEBUG [RS:1;0450ab8807f5:35663 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:47:10,608 DEBUG [RS:1;0450ab8807f5:35663 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:47:10,608 DEBUG [RS:1;0450ab8807f5:35663 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:47:10,608 DEBUG [RS:1;0450ab8807f5:35663 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/0450ab8807f5:0, corePoolSize=3, maxPoolSize=3 2024-11-16T12:47:10,608 DEBUG [RS:1;0450ab8807f5:35663 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/0450ab8807f5:0, corePoolSize=3, maxPoolSize=3 2024-11-16T12:47:10,609 INFO [RS:1;0450ab8807f5:35663 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T12:47:10,609 INFO [RS:1;0450ab8807f5:35663 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T12:47:10,609 INFO [RS:1;0450ab8807f5:35663 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T12:47:10,609 INFO [RS:1;0450ab8807f5:35663 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-16T12:47:10,609 INFO [RS:1;0450ab8807f5:35663 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-16T12:47:10,609 INFO [RS:1;0450ab8807f5:35663 {}] hbase.ChoreService(168): Chore ScheduledChore name=0450ab8807f5,35663,1731761230502-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-16T12:47:10,624 INFO [RS:1;0450ab8807f5:35663 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-16T12:47:10,624 INFO [RS:1;0450ab8807f5:35663 {}] hbase.ChoreService(168): Chore ScheduledChore name=0450ab8807f5,35663,1731761230502-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T12:47:10,624 INFO [RS:1;0450ab8807f5:35663 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T12:47:10,624 INFO [RS:1;0450ab8807f5:35663 {}] regionserver.Replication(171): 0450ab8807f5,35663,1731761230502 started 2024-11-16T12:47:10,636 INFO [RS:1;0450ab8807f5:35663 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T12:47:10,636 INFO [RS:1;0450ab8807f5:35663 {}] regionserver.HRegionServer(1482): Serving as 0450ab8807f5,35663,1731761230502, RpcServer on 0450ab8807f5/172.17.0.2:35663, sessionid=0x10144f8dfd10002 2024-11-16T12:47:10,636 DEBUG [RS:1;0450ab8807f5:35663 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-16T12:47:10,637 DEBUG [RS:1;0450ab8807f5:35663 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 0450ab8807f5,35663,1731761230502 2024-11-16T12:47:10,637 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2882): Started new server=Thread[RS:1;0450ab8807f5:35663,5,FailOnTimeoutGroup] 2024-11-16T12:47:10,637 DEBUG [RS:1;0450ab8807f5:35663 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '0450ab8807f5,35663,1731761230502' 2024-11-16T12:47:10,637 DEBUG [RS:1;0450ab8807f5:35663 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-16T12:47:10,637 INFO [Time-limited test {}] wal.TestLogRolling(207): Replication=2 2024-11-16T12:47:10,637 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-16T12:47:10,637 DEBUG [RS:1;0450ab8807f5:35663 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-16T12:47:10,638 DEBUG [RS:1;0450ab8807f5:35663 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-16T12:47:10,638 DEBUG [RS:1;0450ab8807f5:35663 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-16T12:47:10,638 DEBUG [RS:1;0450ab8807f5:35663 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 0450ab8807f5,35663,1731761230502 2024-11-16T12:47:10,638 DEBUG [RS:1;0450ab8807f5:35663 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '0450ab8807f5,35663,1731761230502' 2024-11-16T12:47:10,638 DEBUG [RS:1;0450ab8807f5:35663 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-16T12:47:10,638 DEBUG [RS:1;0450ab8807f5:35663 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-16T12:47:10,638 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.AsyncConnectionImpl(321): The fetched master address is 0450ab8807f5,35227,1731761229084 2024-11-16T12:47:10,638 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@3deaf92c 2024-11-16T12:47:10,639 DEBUG [RS:1;0450ab8807f5:35663 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-16T12:47:10,639 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-16T12:47:10,639 INFO [RS:1;0450ab8807f5:35663 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-16T12:47:10,639 INFO [RS:1;0450ab8807f5:35663 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-16T12:47:10,640 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38642, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-16T12:47:10,641 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35227 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-16T12:47:10,641 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35227 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-16T12:47:10,641 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35227 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnDatanodeDeath', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-16T12:47:10,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35227 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath 2024-11-16T12:47:10,644 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_PRE_OPERATION 2024-11-16T12:47:10,644 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:47:10,644 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35227 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnDatanodeDeath" procId is: 4 2024-11-16T12:47:10,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-16T12:47:10,645 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-16T12:47:10,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34023 is added to blk_1073741835_1011 (size=393) 2024-11-16T12:47:10,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41753 is added to blk_1073741835_1011 (size=393) 2024-11-16T12:47:10,654 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => d3649421ab101859b27c0f36943512c7, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1731761230641.d3649421ab101859b27c0f36943512c7.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnDatanodeDeath', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628 2024-11-16T12:47:10,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34023 is added to blk_1073741836_1012 (size=76) 2024-11-16T12:47:10,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41753 is added to blk_1073741836_1012 (size=76) 2024-11-16T12:47:10,665 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1731761230641.d3649421ab101859b27c0f36943512c7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T12:47:10,665 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1722): Closing d3649421ab101859b27c0f36943512c7, disabling compactions & flushes 2024-11-16T12:47:10,665 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1731761230641.d3649421ab101859b27c0f36943512c7. 2024-11-16T12:47:10,665 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731761230641.d3649421ab101859b27c0f36943512c7. 2024-11-16T12:47:10,665 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731761230641.d3649421ab101859b27c0f36943512c7. after waiting 0 ms 2024-11-16T12:47:10,665 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1731761230641.d3649421ab101859b27c0f36943512c7. 2024-11-16T12:47:10,665 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731761230641.d3649421ab101859b27c0f36943512c7. 2024-11-16T12:47:10,665 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1676): Region close journal for d3649421ab101859b27c0f36943512c7: Waiting for close lock at 1731761230665Disabling compacts and flushes for region at 1731761230665Disabling writes for close at 1731761230665Writing region close event to WAL at 1731761230665Closed at 1731761230665 2024-11-16T12:47:10,667 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ADD_TO_META 2024-11-16T12:47:10,667 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnDatanodeDeath,,1731761230641.d3649421ab101859b27c0f36943512c7.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1731761230667"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731761230667"}]},"ts":"1731761230667"} 2024-11-16T12:47:10,670 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-16T12:47:10,672 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-16T12:47:10,672 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731761230672"}]},"ts":"1731761230672"} 2024-11-16T12:47:10,675 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLING in hbase:meta 2024-11-16T12:47:10,676 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=d3649421ab101859b27c0f36943512c7, ASSIGN}] 2024-11-16T12:47:10,677 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=d3649421ab101859b27c0f36943512c7, ASSIGN 2024-11-16T12:47:10,679 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=d3649421ab101859b27c0f36943512c7, ASSIGN; state=OFFLINE, location=0450ab8807f5,33749,1731761229240; forceNewPlan=false, retain=false 2024-11-16T12:47:10,742 INFO [RS:1;0450ab8807f5:35663 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0450ab8807f5%2C35663%2C1731761230502, suffix=, logDir=hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502, archiveDir=hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/oldWALs, maxLogs=32 2024-11-16T12:47:10,743 INFO [RS:1;0450ab8807f5:35663 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0450ab8807f5%2C35663%2C1731761230502.1731761230742 2024-11-16T12:47:10,759 INFO [RS:1;0450ab8807f5:35663 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 2024-11-16T12:47:10,774 DEBUG [RS:1;0450ab8807f5:35663 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38523:38523),(127.0.0.1/127.0.0.1:33799:33799)] 2024-11-16T12:47:10,830 INFO [0450ab8807f5:35227 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-16T12:47:10,830 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=d3649421ab101859b27c0f36943512c7, regionState=OPENING, regionLocation=0450ab8807f5,33749,1731761229240 2024-11-16T12:47:10,834 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=d3649421ab101859b27c0f36943512c7, ASSIGN because future has completed 2024-11-16T12:47:10,843 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure d3649421ab101859b27c0f36943512c7, server=0450ab8807f5,33749,1731761229240}] 2024-11-16T12:47:11,002 INFO [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnDatanodeDeath,,1731761230641.d3649421ab101859b27c0f36943512c7. 2024-11-16T12:47:11,002 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => d3649421ab101859b27c0f36943512c7, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1731761230641.d3649421ab101859b27c0f36943512c7.', STARTKEY => '', ENDKEY => ''} 2024-11-16T12:47:11,002 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnDatanodeDeath d3649421ab101859b27c0f36943512c7 2024-11-16T12:47:11,003 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1731761230641.d3649421ab101859b27c0f36943512c7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T12:47:11,003 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for d3649421ab101859b27c0f36943512c7 2024-11-16T12:47:11,003 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for d3649421ab101859b27c0f36943512c7 2024-11-16T12:47:11,004 INFO [StoreOpener-d3649421ab101859b27c0f36943512c7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region d3649421ab101859b27c0f36943512c7 2024-11-16T12:47:11,006 INFO [StoreOpener-d3649421ab101859b27c0f36943512c7-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d3649421ab101859b27c0f36943512c7 columnFamilyName info 2024-11-16T12:47:11,006 DEBUG [StoreOpener-d3649421ab101859b27c0f36943512c7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:47:11,006 INFO [StoreOpener-d3649421ab101859b27c0f36943512c7-1 {}] regionserver.HStore(327): Store=d3649421ab101859b27c0f36943512c7/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T12:47:11,007 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for d3649421ab101859b27c0f36943512c7 2024-11-16T12:47:11,007 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d3649421ab101859b27c0f36943512c7 2024-11-16T12:47:11,008 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d3649421ab101859b27c0f36943512c7 2024-11-16T12:47:11,008 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for d3649421ab101859b27c0f36943512c7 2024-11-16T12:47:11,008 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for d3649421ab101859b27c0f36943512c7 2024-11-16T12:47:11,010 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for d3649421ab101859b27c0f36943512c7 2024-11-16T12:47:11,012 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d3649421ab101859b27c0f36943512c7/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T12:47:11,012 INFO [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened d3649421ab101859b27c0f36943512c7; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=882664, jitterRate=0.12236566841602325}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-16T12:47:11,012 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for d3649421ab101859b27c0f36943512c7 2024-11-16T12:47:11,013 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for d3649421ab101859b27c0f36943512c7: Running coprocessor pre-open hook at 1731761231003Writing region info on filesystem at 1731761231003Initializing all the Stores at 1731761231004 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731761231004Cleaning up temporary data from old regions at 1731761231008 (+4 ms)Running coprocessor post-open hooks at 1731761231012 (+4 ms)Region opened successfully at 1731761231013 (+1 ms) 2024-11-16T12:47:11,014 INFO [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnDatanodeDeath,,1731761230641.d3649421ab101859b27c0f36943512c7., pid=6, masterSystemTime=1731761230997 2024-11-16T12:47:11,016 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnDatanodeDeath,,1731761230641.d3649421ab101859b27c0f36943512c7. 2024-11-16T12:47:11,017 INFO [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnDatanodeDeath,,1731761230641.d3649421ab101859b27c0f36943512c7. 2024-11-16T12:47:11,017 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=d3649421ab101859b27c0f36943512c7, regionState=OPEN, openSeqNum=2, regionLocation=0450ab8807f5,33749,1731761229240 2024-11-16T12:47:11,020 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure d3649421ab101859b27c0f36943512c7, server=0450ab8807f5,33749,1731761229240 because future has completed 2024-11-16T12:47:11,024 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-16T12:47:11,024 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure d3649421ab101859b27c0f36943512c7, server=0450ab8807f5,33749,1731761229240 in 179 msec 2024-11-16T12:47:11,027 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-16T12:47:11,027 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=d3649421ab101859b27c0f36943512c7, ASSIGN in 348 msec 2024-11-16T12:47:11,029 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-16T12:47:11,029 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731761231029"}]},"ts":"1731761231029"} 2024-11-16T12:47:11,031 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLED in hbase:meta 2024-11-16T12:47:11,033 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_POST_OPERATION 2024-11-16T12:47:11,035 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath in 392 msec 2024-11-16T12:47:15,804 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-16T12:47:15,807 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:47:15,828 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:47:15,830 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:47:15,830 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:47:15,837 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnDatanodeDeath' 2024-11-16T12:47:16,950 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-16T12:47:16,950 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-16T12:47:16,951 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-16T12:47:16,951 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath Metrics about Tables on a single HBase RegionServer 2024-11-16T12:47:16,951 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-16T12:47:16,951 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-16T12:47:20,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-16T12:47:20,727 INFO [RPCClient-NioEventLoopGroup-4-11 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnDatanodeDeath completed 2024-11-16T12:47:20,727 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnDatanodeDeath,, stopping at row=TestLogRolling-testLogRollOnDatanodeDeath ,, for max=2147483647 with caching=100 2024-11-16T12:47:20,735 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnDatanodeDeath 2024-11-16T12:47:20,735 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnDatanodeDeath,,1731761230641.d3649421ab101859b27c0f36943512c7. 2024-11-16T12:47:20,811 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T12:47:20,814 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T12:47:20,815 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T12:47:20,815 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T12:47:20,815 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-16T12:47:20,816 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4ccc1bc4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/hadoop.log.dir/,AVAILABLE} 2024-11-16T12:47:20,816 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5580c33e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T12:47:20,910 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@76fb109e{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/java.io.tmpdir/jetty-localhost-39853-hadoop-hdfs-3_4_1-tests_jar-_-any-7763540095087427410/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T12:47:20,911 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@624ef820{HTTP/1.1, (http/1.1)}{localhost:39853} 2024-11-16T12:47:20,911 INFO [Time-limited test {}] server.Server(415): Started @119798ms 2024-11-16T12:47:20,912 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T12:47:20,945 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T12:47:20,949 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T12:47:20,950 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T12:47:20,950 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T12:47:20,950 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-16T12:47:20,951 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3247fd57{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/hadoop.log.dir/,AVAILABLE} 2024-11-16T12:47:20,951 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@178f342a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T12:47:21,051 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@79f7fcc4{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/java.io.tmpdir/jetty-localhost-37409-hadoop-hdfs-3_4_1-tests_jar-_-any-2091581272558166352/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T12:47:21,052 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5fb4bc9e{HTTP/1.1, (http/1.1)}{localhost:37409} 2024-11-16T12:47:21,052 INFO [Time-limited test {}] server.Server(415): Started @119939ms 2024-11-16T12:47:21,053 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T12:47:21,082 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T12:47:21,085 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T12:47:21,086 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T12:47:21,086 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T12:47:21,086 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-16T12:47:21,087 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@633469fc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/hadoop.log.dir/,AVAILABLE} 2024-11-16T12:47:21,087 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3740407e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T12:47:21,181 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7683b54c{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/java.io.tmpdir/jetty-localhost-38905-hadoop-hdfs-3_4_1-tests_jar-_-any-244994032406174269/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T12:47:21,181 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1191c470{HTTP/1.1, (http/1.1)}{localhost:38905} 2024-11-16T12:47:21,181 INFO [Time-limited test {}] server.Server(415): Started @120068ms 2024-11-16T12:47:21,182 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T12:47:21,692 WARN [Thread-865 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/cluster_7a19b596-aa5b-5866-3868-b2473a5bf7fd/data/data5/current/BP-886867469-172.17.0.2-1731761227469/current, will proceed with Du for space computation calculation, 2024-11-16T12:47:21,692 WARN [Thread-866 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/cluster_7a19b596-aa5b-5866-3868-b2473a5bf7fd/data/data6/current/BP-886867469-172.17.0.2-1731761227469/current, will proceed with Du for space computation calculation, 2024-11-16T12:47:21,709 WARN [Thread-808 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T12:47:21,712 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x14911b6a669dbb86 with lease ID 0x92d8eda59640438a: Processing first storage report for DS-2055084b-7869-4d0c-b186-25cbf66349fb from datanode DatanodeRegistration(127.0.0.1:37937, datanodeUuid=731de3a5-c577-4d82-9c1a-c69a453fb594, infoPort=39937, infoSecurePort=0, ipcPort=41167, storageInfo=lv=-57;cid=testClusterID;nsid=2033418441;c=1731761227469) 2024-11-16T12:47:21,712 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x14911b6a669dbb86 with lease ID 0x92d8eda59640438a: from storage DS-2055084b-7869-4d0c-b186-25cbf66349fb node DatanodeRegistration(127.0.0.1:37937, datanodeUuid=731de3a5-c577-4d82-9c1a-c69a453fb594, infoPort=39937, infoSecurePort=0, ipcPort=41167, storageInfo=lv=-57;cid=testClusterID;nsid=2033418441;c=1731761227469), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T12:47:21,712 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x14911b6a669dbb86 with lease ID 0x92d8eda59640438a: Processing first storage report for DS-62bb8653-d694-483f-9b1c-7a424323a9df from datanode DatanodeRegistration(127.0.0.1:37937, datanodeUuid=731de3a5-c577-4d82-9c1a-c69a453fb594, infoPort=39937, infoSecurePort=0, ipcPort=41167, storageInfo=lv=-57;cid=testClusterID;nsid=2033418441;c=1731761227469) 2024-11-16T12:47:21,712 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x14911b6a669dbb86 with lease ID 0x92d8eda59640438a: from storage DS-62bb8653-d694-483f-9b1c-7a424323a9df node DatanodeRegistration(127.0.0.1:37937, datanodeUuid=731de3a5-c577-4d82-9c1a-c69a453fb594, infoPort=39937, infoSecurePort=0, ipcPort=41167, storageInfo=lv=-57;cid=testClusterID;nsid=2033418441;c=1731761227469), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T12:47:21,970 WARN [Thread-878 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/cluster_7a19b596-aa5b-5866-3868-b2473a5bf7fd/data/data7/current/BP-886867469-172.17.0.2-1731761227469/current, will proceed with Du for space computation calculation, 2024-11-16T12:47:21,970 WARN [Thread-879 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/cluster_7a19b596-aa5b-5866-3868-b2473a5bf7fd/data/data8/current/BP-886867469-172.17.0.2-1731761227469/current, will proceed with Du for space computation calculation, 2024-11-16T12:47:21,989 WARN [Thread-830 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T12:47:21,992 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x842660f0b95b5baf with lease ID 0x92d8eda59640438b: Processing first storage report for DS-237c4a1c-6bb3-4081-85ce-248206a8d4b3 from datanode DatanodeRegistration(127.0.0.1:46657, datanodeUuid=c07e50c5-97c3-4ced-b66a-aa7258388f1b, infoPort=34707, infoSecurePort=0, ipcPort=34013, storageInfo=lv=-57;cid=testClusterID;nsid=2033418441;c=1731761227469) 2024-11-16T12:47:21,992 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x842660f0b95b5baf with lease ID 0x92d8eda59640438b: from storage DS-237c4a1c-6bb3-4081-85ce-248206a8d4b3 node DatanodeRegistration(127.0.0.1:46657, datanodeUuid=c07e50c5-97c3-4ced-b66a-aa7258388f1b, infoPort=34707, infoSecurePort=0, ipcPort=34013, storageInfo=lv=-57;cid=testClusterID;nsid=2033418441;c=1731761227469), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T12:47:21,992 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x842660f0b95b5baf with lease ID 0x92d8eda59640438b: Processing first storage report for DS-f28707cb-05de-4af0-a913-4e54ef1e9cdd from datanode DatanodeRegistration(127.0.0.1:46657, datanodeUuid=c07e50c5-97c3-4ced-b66a-aa7258388f1b, infoPort=34707, infoSecurePort=0, ipcPort=34013, storageInfo=lv=-57;cid=testClusterID;nsid=2033418441;c=1731761227469) 2024-11-16T12:47:21,992 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x842660f0b95b5baf with lease ID 0x92d8eda59640438b: from storage DS-f28707cb-05de-4af0-a913-4e54ef1e9cdd node DatanodeRegistration(127.0.0.1:46657, datanodeUuid=c07e50c5-97c3-4ced-b66a-aa7258388f1b, infoPort=34707, infoSecurePort=0, ipcPort=34013, storageInfo=lv=-57;cid=testClusterID;nsid=2033418441;c=1731761227469), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T12:47:22,009 WARN [Thread-889 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/cluster_7a19b596-aa5b-5866-3868-b2473a5bf7fd/data/data9/current/BP-886867469-172.17.0.2-1731761227469/current, will proceed with Du for space computation calculation, 2024-11-16T12:47:22,009 WARN [Thread-890 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/cluster_7a19b596-aa5b-5866-3868-b2473a5bf7fd/data/data10/current/BP-886867469-172.17.0.2-1731761227469/current, will proceed with Du for space computation calculation, 2024-11-16T12:47:22,029 WARN [Thread-852 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T12:47:22,032 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x62d1b9143b7dc24d with lease ID 0x92d8eda59640438c: Processing first storage report for DS-bf04fae8-0ed5-4484-9b52-e6e454497ab0 from datanode DatanodeRegistration(127.0.0.1:39047, datanodeUuid=dff4dd73-5130-47d8-ba40-d3b4d30d042b, infoPort=42393, infoSecurePort=0, ipcPort=43771, storageInfo=lv=-57;cid=testClusterID;nsid=2033418441;c=1731761227469) 2024-11-16T12:47:22,032 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x62d1b9143b7dc24d with lease ID 0x92d8eda59640438c: from storage DS-bf04fae8-0ed5-4484-9b52-e6e454497ab0 node DatanodeRegistration(127.0.0.1:39047, datanodeUuid=dff4dd73-5130-47d8-ba40-d3b4d30d042b, infoPort=42393, infoSecurePort=0, ipcPort=43771, storageInfo=lv=-57;cid=testClusterID;nsid=2033418441;c=1731761227469), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T12:47:22,032 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x62d1b9143b7dc24d with lease ID 0x92d8eda59640438c: Processing first storage report for DS-f018d334-41f1-4ff3-a980-05c9dd435976 from datanode DatanodeRegistration(127.0.0.1:39047, datanodeUuid=dff4dd73-5130-47d8-ba40-d3b4d30d042b, infoPort=42393, infoSecurePort=0, ipcPort=43771, storageInfo=lv=-57;cid=testClusterID;nsid=2033418441;c=1731761227469) 2024-11-16T12:47:22,032 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x62d1b9143b7dc24d with lease ID 0x92d8eda59640438c: from storage DS-f018d334-41f1-4ff3-a980-05c9dd435976 node DatanodeRegistration(127.0.0.1:39047, datanodeUuid=dff4dd73-5130-47d8-ba40-d3b4d30d042b, infoPort=42393, infoSecurePort=0, ipcPort=43771, storageInfo=lv=-57;cid=testClusterID;nsid=2033418441;c=1731761227469), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T12:47:22,109 WARN [ResponseProcessor for block BP-886867469-172.17.0.2-1731761227469:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-886867469-172.17.0.2-1731761227469:blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:22,109 WARN [ResponseProcessor for block BP-886867469-172.17.0.2-1731761227469:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-886867469-172.17.0.2-1731761227469:blk_1073741830_1006 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:22,109 WARN [ResponseProcessor for block BP-886867469-172.17.0.2-1731761227469:blk_1073741837_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-886867469-172.17.0.2-1731761227469:blk_1073741837_1013 java.io.IOException: Bad response ERROR for BP-886867469-172.17.0.2-1731761227469:blk_1073741837_1013 from datanode DatanodeInfoWithStorage[127.0.0.1:41753,DS-8258fc71-cd75-42d7-8209-2e094c48164b,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:22,109 WARN [ResponseProcessor for block BP-886867469-172.17.0.2-1731761227469:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-886867469-172.17.0.2-1731761227469:blk_1073741834_1010 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:22,110 WARN [DataStreamer for file /user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/MasterData/WALs/0450ab8807f5,35227,1731761229084/0450ab8807f5%2C35227%2C1731761229084.1731761229377 block BP-886867469-172.17.0.2-1731761227469:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-886867469-172.17.0.2-1731761227469:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41753,DS-8258fc71-cd75-42d7-8209-2e094c48164b,DISK], DatanodeInfoWithStorage[127.0.0.1:34023,DS-7068dce2-5d81-4029-bc2f-296d6fc5c0df,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41753,DS-8258fc71-cd75-42d7-8209-2e094c48164b,DISK]) is bad. 2024-11-16T12:47:22,110 WARN [DataStreamer for file /user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.1731761229866 block BP-886867469-172.17.0.2-1731761227469:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-886867469-172.17.0.2-1731761227469:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41753,DS-8258fc71-cd75-42d7-8209-2e094c48164b,DISK], DatanodeInfoWithStorage[127.0.0.1:34023,DS-7068dce2-5d81-4029-bc2f-296d6fc5c0df,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41753,DS-8258fc71-cd75-42d7-8209-2e094c48164b,DISK]) is bad. 2024-11-16T12:47:22,111 WARN [DataStreamer for file /user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 block BP-886867469-172.17.0.2-1731761227469:blk_1073741837_1013 {}] hdfs.DataStreamer(1731): Error Recovery for BP-886867469-172.17.0.2-1731761227469:blk_1073741837_1013 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34023,DS-7068dce2-5d81-4029-bc2f-296d6fc5c0df,DISK], DatanodeInfoWithStorage[127.0.0.1:41753,DS-8258fc71-cd75-42d7-8209-2e094c48164b,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41753,DS-8258fc71-cd75-42d7-8209-2e094c48164b,DISK]) is bad. 2024-11-16T12:47:22,112 WARN [DataStreamer for file /user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta block BP-886867469-172.17.0.2-1731761227469:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-886867469-172.17.0.2-1731761227469:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41753,DS-8258fc71-cd75-42d7-8209-2e094c48164b,DISK], DatanodeInfoWithStorage[127.0.0.1:34023,DS-7068dce2-5d81-4029-bc2f-296d6fc5c0df,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41753,DS-8258fc71-cd75-42d7-8209-2e094c48164b,DISK]) is bad. 2024-11-16T12:47:22,111 WARN [PacketResponder: BP-886867469-172.17.0.2-1731761227469:blk_1073741837_1013, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:41753] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T12:47:22,113 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1287641734_22 at /127.0.0.1:38502 [Receiving block BP-886867469-172.17.0.2-1731761227469:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:41753:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38502 dst: /127.0.0.1:41753 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T12:47:22,113 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_221165497_22 at /127.0.0.1:38480 [Receiving block BP-886867469-172.17.0.2-1731761227469:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:41753:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38480 dst: /127.0.0.1:41753 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T12:47:22,114 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1287641734_22 at /127.0.0.1:60586 [Receiving block BP-886867469-172.17.0.2-1731761227469:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:34023:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60586 dst: /127.0.0.1:34023 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T12:47:22,115 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1287641734_22 at /127.0.0.1:38512 [Receiving block BP-886867469-172.17.0.2-1731761227469:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:41753:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38512 dst: /127.0.0.1:41753 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T12:47:22,115 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1287641734_22 at /127.0.0.1:60598 [Receiving block BP-886867469-172.17.0.2-1731761227469:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:34023:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60598 dst: /127.0.0.1:34023 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T12:47:22,116 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2090494033_22 at /127.0.0.1:60622 [Receiving block BP-886867469-172.17.0.2-1731761227469:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:34023:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60622 dst: /127.0.0.1:34023 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T12:47:22,116 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2090494033_22 at /127.0.0.1:38542 [Receiving block BP-886867469-172.17.0.2-1731761227469:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:41753:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38542 dst: /127.0.0.1:41753 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T12:47:22,117 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_221165497_22 at /127.0.0.1:60548 [Receiving block BP-886867469-172.17.0.2-1731761227469:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:34023:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60548 dst: /127.0.0.1:34023 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T12:47:22,118 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@781791f2{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T12:47:22,118 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2273073a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T12:47:22,119 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T12:47:22,119 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5f988831{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T12:47:22,119 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@175f7b69{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/hadoop.log.dir/,STOPPED} 2024-11-16T12:47:22,120 WARN [BP-886867469-172.17.0.2-1731761227469 heartbeating to localhost/127.0.0.1:39393 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T12:47:22,120 WARN [BP-886867469-172.17.0.2-1731761227469 heartbeating to localhost/127.0.0.1:39393 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-886867469-172.17.0.2-1731761227469 (Datanode Uuid fc9bb905-c3c0-4f61-a035-12b1008d0723) service to localhost/127.0.0.1:39393 2024-11-16T12:47:22,120 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T12:47:22,120 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T12:47:22,121 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/cluster_7a19b596-aa5b-5866-3868-b2473a5bf7fd/data/data3/current/BP-886867469-172.17.0.2-1731761227469 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T12:47:22,121 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/cluster_7a19b596-aa5b-5866-3868-b2473a5bf7fd/data/data4/current/BP-886867469-172.17.0.2-1731761227469 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T12:47:22,121 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T12:47:22,122 WARN [DataStreamer for file /user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.1731761229866 block BP-886867469-172.17.0.2-1731761227469:blk_1073741833_1009 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741833_1009 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:22,122 WARN [DataStreamer for file /user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 block BP-886867469-172.17.0.2-1731761227469:blk_1073741837_1013 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741837_1013 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:22,122 WARN [DataStreamer for file /user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta block BP-886867469-172.17.0.2-1731761227469:blk_1073741834_1010 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741834_1010 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:22,127 ERROR [org.apache.hadoop.hdfs.server.datanode.DataXceiver@705583c1 {}] datanode.DataXceiver(331): 127.0.0.1:34023:DataXceiver error processing unknown operation src: /127.0.0.1:36180 dst: /127.0.0.1:34023 java.io.IOException: Server closed. at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.addPeer(DataXceiverServer.java:334) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:232) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T12:47:22,127 WARN [DataStreamer for file /user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/MasterData/WALs/0450ab8807f5,35227,1731761229084/0450ab8807f5%2C35227%2C1731761229084.1731761229377 block BP-886867469-172.17.0.2-1731761227469:blk_1073741830_1006 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741830_1006 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:22,136 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@461a42e8{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T12:47:22,136 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5289966b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T12:47:22,136 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T12:47:22,137 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@35026af9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T12:47:22,137 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@131ba134{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/hadoop.log.dir/,STOPPED} 2024-11-16T12:47:22,138 WARN [BP-886867469-172.17.0.2-1731761227469 heartbeating to localhost/127.0.0.1:39393 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T12:47:22,138 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T12:47:22,138 WARN [BP-886867469-172.17.0.2-1731761227469 heartbeating to localhost/127.0.0.1:39393 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-886867469-172.17.0.2-1731761227469 (Datanode Uuid 92e12824-0524-4115-9c7f-83686a2d39fd) service to localhost/127.0.0.1:39393 2024-11-16T12:47:22,138 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T12:47:22,139 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/cluster_7a19b596-aa5b-5866-3868-b2473a5bf7fd/data/data1/current/BP-886867469-172.17.0.2-1731761227469 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T12:47:22,139 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/cluster_7a19b596-aa5b-5866-3868-b2473a5bf7fd/data/data2/current/BP-886867469-172.17.0.2-1731761227469 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T12:47:22,139 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T12:47:22,143 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnDatanodeDeath', row='row0002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnDatanodeDeath,,1731761230641.d3649421ab101859b27c0f36943512c7., hostname=0450ab8807f5,33749,1731761229240, seqNum=2] 2024-11-16T12:47:22,145 ERROR [FSHLog-0-hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628-prefix:0450ab8807f5,33749,1731761229240 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34023,DS-7068dce2-5d81-4029-bc2f-296d6fc5c0df,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:22,145 WARN [FSHLog-0-hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628-prefix:0450ab8807f5,33749,1731761229240 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34023,DS-7068dce2-5d81-4029-bc2f-296d6fc5c0df,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:22,146 DEBUG [regionserver/0450ab8807f5:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 0450ab8807f5%2C33749%2C1731761229240:(num 1731761229866) roll requested 2024-11-16T12:47:22,146 INFO [regionserver/0450ab8807f5:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0450ab8807f5%2C33749%2C1731761229240.1731761242146 2024-11-16T12:47:22,162 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:47:22,162 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:47:22,162 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:47:22,162 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:47:22,163 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:47:22,163 INFO [regionserver/0450ab8807f5:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.1731761229866 with entries=1, filesize=455 B; new WAL /user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.1731761242146 2024-11-16T12:47:22,167 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34023,DS-7068dce2-5d81-4029-bc2f-296d6fc5c0df,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:22,167 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34023,DS-7068dce2-5d81-4029-bc2f-296d6fc5c0df,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:22,169 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-11-16T12:47:22,169 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-11-16T12:47:22,169 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.1731761229866 2024-11-16T12:47:22,173 WARN [IPC Server handler 2 on default port 39393 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.1731761229866 has not been closed. Lease recovery is in progress. RecoveryId = 1019 for block blk_1073741833_1009 2024-11-16T12:47:22,177 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.1731761229866 after 6ms 2024-11-16T12:47:22,183 DEBUG [regionserver/0450ab8807f5:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42393:42393),(127.0.0.1/127.0.0.1:39937:39937)] 2024-11-16T12:47:22,183 DEBUG [regionserver/0450ab8807f5:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.1731761229866 is not closed yet, will try archiving it next time 2024-11-16T12:47:22,610 INFO [regionserver/0450ab8807f5:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34023,DS-7068dce2-5d81-4029-bc2f-296d6fc5c0df,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:23,356 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34023,DS-7068dce2-5d81-4029-bc2f-296d6fc5c0df,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:24,183 INFO [regionserver/0450ab8807f5:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34023,DS-7068dce2-5d81-4029-bc2f-296d6fc5c0df,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:24,185 INFO [Time-limited test {}] wal.TestLogRolling(261): log.getCurrentFileName(): hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.1731761242146 2024-11-16T12:47:24,186 WARN [ResponseProcessor for block BP-886867469-172.17.0.2-1731761227469:blk_1073741838_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-886867469-172.17.0.2-1731761227469:blk_1073741838_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:24,186 WARN [DataStreamer for file /user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.1731761242146 block BP-886867469-172.17.0.2-1731761227469:blk_1073741838_1018 {}] hdfs.DataStreamer(1731): Error Recovery for BP-886867469-172.17.0.2-1731761227469:blk_1073741838_1018 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39047,DS-bf04fae8-0ed5-4484-9b52-e6e454497ab0,DISK], DatanodeInfoWithStorage[127.0.0.1:37937,DS-2055084b-7869-4d0c-b186-25cbf66349fb,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39047,DS-bf04fae8-0ed5-4484-9b52-e6e454497ab0,DISK]) is bad. 2024-11-16T12:47:24,187 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1287641734_22 at /127.0.0.1:42630 [Receiving block BP-886867469-172.17.0.2-1731761227469:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:39047:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42630 dst: /127.0.0.1:39047 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T12:47:24,187 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1287641734_22 at /127.0.0.1:60554 [Receiving block BP-886867469-172.17.0.2-1731761227469:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:37937:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60554 dst: /127.0.0.1:37937 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T12:47:24,225 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7683b54c{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T12:47:24,226 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1191c470{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T12:47:24,226 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T12:47:24,226 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3740407e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T12:47:24,226 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@633469fc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/hadoop.log.dir/,STOPPED} 2024-11-16T12:47:24,228 WARN [BP-886867469-172.17.0.2-1731761227469 heartbeating to localhost/127.0.0.1:39393 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T12:47:24,228 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T12:47:24,228 WARN [BP-886867469-172.17.0.2-1731761227469 heartbeating to localhost/127.0.0.1:39393 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-886867469-172.17.0.2-1731761227469 (Datanode Uuid dff4dd73-5130-47d8-ba40-d3b4d30d042b) service to localhost/127.0.0.1:39393 2024-11-16T12:47:24,228 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T12:47:24,228 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/cluster_7a19b596-aa5b-5866-3868-b2473a5bf7fd/data/data9/current/BP-886867469-172.17.0.2-1731761227469 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T12:47:24,228 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/cluster_7a19b596-aa5b-5866-3868-b2473a5bf7fd/data/data10/current/BP-886867469-172.17.0.2-1731761227469 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T12:47:24,228 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T12:47:24,610 INFO [regionserver/0450ab8807f5:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34023,DS-7068dce2-5d81-4029-bc2f-296d6fc5c0df,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:25,356 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34023,DS-7068dce2-5d81-4029-bc2f-296d6fc5c0df,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:26,179 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.1731761229866 after 4010ms 2024-11-16T12:47:26,184 INFO [regionserver/0450ab8807f5:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34023,DS-7068dce2-5d81-4029-bc2f-296d6fc5c0df,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:26,185 WARN [regionserver/0450ab8807f5:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37937,DS-2055084b-7869-4d0c-b186-25cbf66349fb,DISK]] 2024-11-16T12:47:26,185 DEBUG [regionserver/0450ab8807f5:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 0450ab8807f5%2C33749%2C1731761229240:(num 1731761242146) roll requested 2024-11-16T12:47:26,185 INFO [regionserver/0450ab8807f5:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0450ab8807f5%2C33749%2C1731761229240.1731761246185 2024-11-16T12:47:26,191 WARN [Thread-910 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741839_1021 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:26,192 WARN [Thread-910 {}] hdfs.DataStreamer(1731): Error Recovery for BP-886867469-172.17.0.2-1731761227469:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34023,DS-7068dce2-5d81-4029-bc2f-296d6fc5c0df,DISK], DatanodeInfoWithStorage[127.0.0.1:41753,DS-8258fc71-cd75-42d7-8209-2e094c48164b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34023,DS-7068dce2-5d81-4029-bc2f-296d6fc5c0df,DISK]) is bad. 2024-11-16T12:47:26,192 WARN [Thread-910 {}] hdfs.DataStreamer(1850): Abandoning BP-886867469-172.17.0.2-1731761227469:blk_1073741839_1021 2024-11-16T12:47:26,197 WARN [Thread-910 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34023,DS-7068dce2-5d81-4029-bc2f-296d6fc5c0df,DISK] 2024-11-16T12:47:26,202 WARN [Thread-910 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741840_1022 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:39047 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:26,202 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1287641734_22 at /127.0.0.1:40114 [Receiving block BP-886867469-172.17.0.2-1731761227469:blk_1073741840_1022] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/cluster_7a19b596-aa5b-5866-3868-b2473a5bf7fd/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/cluster_7a19b596-aa5b-5866-3868-b2473a5bf7fd/data/data8]'}, localName='127.0.0.1:46657', datanodeUuid='c07e50c5-97c3-4ced-b66a-aa7258388f1b', xmitsInProgress=0}:Exception transferring block BP-886867469-172.17.0.2-1731761227469:blk_1073741840_1022 to mirror 127.0.0.1:39047 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T12:47:26,202 WARN [Thread-910 {}] hdfs.DataStreamer(1731): Error Recovery for BP-886867469-172.17.0.2-1731761227469:blk_1073741840_1022 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46657,DS-237c4a1c-6bb3-4081-85ce-248206a8d4b3,DISK], DatanodeInfoWithStorage[127.0.0.1:39047,DS-bf04fae8-0ed5-4484-9b52-e6e454497ab0,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39047,DS-bf04fae8-0ed5-4484-9b52-e6e454497ab0,DISK]) is bad. 2024-11-16T12:47:26,202 WARN [Thread-910 {}] hdfs.DataStreamer(1850): Abandoning BP-886867469-172.17.0.2-1731761227469:blk_1073741840_1022 2024-11-16T12:47:26,202 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1287641734_22 at /127.0.0.1:40114 [Receiving block BP-886867469-172.17.0.2-1731761227469:blk_1073741840_1022] {}] datanode.BlockReceiver(316): Block 1073741840 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-16T12:47:26,202 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1287641734_22 at /127.0.0.1:40114 [Receiving block BP-886867469-172.17.0.2-1731761227469:blk_1073741840_1022] {}] datanode.DataXceiver(331): 127.0.0.1:46657:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40114 dst: /127.0.0.1:46657 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T12:47:26,203 WARN [Thread-910 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39047,DS-bf04fae8-0ed5-4484-9b52-e6e454497ab0,DISK] 2024-11-16T12:47:26,204 WARN [Thread-910 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741841_1023 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41753 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:26,204 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1287641734_22 at /127.0.0.1:60570 [Receiving block BP-886867469-172.17.0.2-1731761227469:blk_1073741841_1023] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/cluster_7a19b596-aa5b-5866-3868-b2473a5bf7fd/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/cluster_7a19b596-aa5b-5866-3868-b2473a5bf7fd/data/data6]'}, localName='127.0.0.1:37937', datanodeUuid='731de3a5-c577-4d82-9c1a-c69a453fb594', xmitsInProgress=0}:Exception transferring block BP-886867469-172.17.0.2-1731761227469:blk_1073741841_1023 to mirror 127.0.0.1:41753 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T12:47:26,205 WARN [Thread-910 {}] hdfs.DataStreamer(1731): Error Recovery for BP-886867469-172.17.0.2-1731761227469:blk_1073741841_1023 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37937,DS-2055084b-7869-4d0c-b186-25cbf66349fb,DISK], DatanodeInfoWithStorage[127.0.0.1:41753,DS-8258fc71-cd75-42d7-8209-2e094c48164b,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41753,DS-8258fc71-cd75-42d7-8209-2e094c48164b,DISK]) is bad. 2024-11-16T12:47:26,205 WARN [Thread-910 {}] hdfs.DataStreamer(1850): Abandoning BP-886867469-172.17.0.2-1731761227469:blk_1073741841_1023 2024-11-16T12:47:26,205 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1287641734_22 at /127.0.0.1:60570 [Receiving block BP-886867469-172.17.0.2-1731761227469:blk_1073741841_1023] {}] datanode.BlockReceiver(316): Block 1073741841 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-16T12:47:26,205 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1287641734_22 at /127.0.0.1:60570 [Receiving block BP-886867469-172.17.0.2-1731761227469:blk_1073741841_1023] {}] datanode.DataXceiver(331): 127.0.0.1:37937:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60570 dst: /127.0.0.1:37937 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T12:47:26,205 WARN [Thread-910 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41753,DS-8258fc71-cd75-42d7-8209-2e094c48164b,DISK] 2024-11-16T12:47:26,210 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:47:26,210 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:47:26,210 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:47:26,210 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:47:26,210 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:47:26,210 INFO [regionserver/0450ab8807f5:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.1731761242146 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.1731761246185 2024-11-16T12:47:26,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37937 is added to blk_1073741838_1020 (size=2431) 2024-11-16T12:47:26,214 DEBUG [regionserver/0450ab8807f5:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39937:39937),(127.0.0.1/127.0.0.1:34707:34707)] 2024-11-16T12:47:26,214 DEBUG [regionserver/0450ab8807f5:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.1731761229866 is not closed yet, will try archiving it next time 2024-11-16T12:47:26,214 DEBUG [regionserver/0450ab8807f5:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.1731761242146 is not closed yet, will try archiving it next time 2024-11-16T12:47:26,233 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-16T12:47:26,611 INFO [regionserver/0450ab8807f5:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34023,DS-7068dce2-5d81-4029-bc2f-296d6fc5c0df,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:26,615 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.1731761229866 is not closed yet, will try archiving it next time 2024-11-16T12:47:27,357 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34023,DS-7068dce2-5d81-4029-bc2f-296d6fc5c0df,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:28,215 INFO [regionserver/0450ab8807f5:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34023,DS-7068dce2-5d81-4029-bc2f-296d6fc5c0df,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:28,237 WARN [ResponseProcessor for block BP-886867469-172.17.0.2-1731761227469:blk_1073741842_1024 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-886867469-172.17.0.2-1731761227469:blk_1073741842_1024 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:28,237 WARN [DataStreamer for file /user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.1731761246185 block BP-886867469-172.17.0.2-1731761227469:blk_1073741842_1024 {}] hdfs.DataStreamer(1731): Error Recovery for BP-886867469-172.17.0.2-1731761227469:blk_1073741842_1024 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37937,DS-2055084b-7869-4d0c-b186-25cbf66349fb,DISK], DatanodeInfoWithStorage[127.0.0.1:46657,DS-237c4a1c-6bb3-4081-85ce-248206a8d4b3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37937,DS-2055084b-7869-4d0c-b186-25cbf66349fb,DISK]) is bad. 2024-11-16T12:47:28,238 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1287641734_22 at /127.0.0.1:40118 [Receiving block BP-886867469-172.17.0.2-1731761227469:blk_1073741842_1024] {}] datanode.DataXceiver(331): 127.0.0.1:46657:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40118 dst: /127.0.0.1:46657 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T12:47:28,238 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1287641734_22 at /127.0.0.1:60572 [Receiving block BP-886867469-172.17.0.2-1731761227469:blk_1073741842_1024] {}] datanode.DataXceiver(331): 127.0.0.1:37937:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60572 dst: /127.0.0.1:37937 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T12:47:28,275 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@76fb109e{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T12:47:28,275 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@624ef820{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T12:47:28,275 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T12:47:28,275 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5580c33e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T12:47:28,275 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4ccc1bc4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/hadoop.log.dir/,STOPPED} 2024-11-16T12:47:28,276 WARN [BP-886867469-172.17.0.2-1731761227469 heartbeating to localhost/127.0.0.1:39393 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T12:47:28,276 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T12:47:28,276 WARN [BP-886867469-172.17.0.2-1731761227469 heartbeating to localhost/127.0.0.1:39393 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-886867469-172.17.0.2-1731761227469 (Datanode Uuid 731de3a5-c577-4d82-9c1a-c69a453fb594) service to localhost/127.0.0.1:39393 2024-11-16T12:47:28,276 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T12:47:28,277 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/cluster_7a19b596-aa5b-5866-3868-b2473a5bf7fd/data/data5/current/BP-886867469-172.17.0.2-1731761227469 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T12:47:28,277 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/cluster_7a19b596-aa5b-5866-3868-b2473a5bf7fd/data/data6/current/BP-886867469-172.17.0.2-1731761227469 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T12:47:28,277 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T12:47:28,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33749 {}] regionserver.HRegion(8855): Flush requested on d3649421ab101859b27c0f36943512c7 2024-11-16T12:47:28,289 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing d3649421ab101859b27c0f36943512c7 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-16T12:47:28,310 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d3649421ab101859b27c0f36943512c7/.tmp/info/d62a6b3f21c542f7be6c35e45cb60f73 is 1080, key is row0002/info:/1731761244230/Put/seqid=0 2024-11-16T12:47:28,312 WARN [Thread-921 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741843_1026 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:28,312 WARN [Thread-921 {}] hdfs.DataStreamer(1731): Error Recovery for BP-886867469-172.17.0.2-1731761227469:blk_1073741843_1026 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41753,DS-8258fc71-cd75-42d7-8209-2e094c48164b,DISK], DatanodeInfoWithStorage[127.0.0.1:39047,DS-bf04fae8-0ed5-4484-9b52-e6e454497ab0,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41753,DS-8258fc71-cd75-42d7-8209-2e094c48164b,DISK]) is bad. 2024-11-16T12:47:28,312 WARN [Thread-921 {}] hdfs.DataStreamer(1850): Abandoning BP-886867469-172.17.0.2-1731761227469:blk_1073741843_1026 2024-11-16T12:47:28,313 WARN [Thread-921 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41753,DS-8258fc71-cd75-42d7-8209-2e094c48164b,DISK] 2024-11-16T12:47:28,315 WARN [Thread-921 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741844_1027 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37937 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:28,315 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1287641734_22 at /127.0.0.1:40134 [Receiving block BP-886867469-172.17.0.2-1731761227469:blk_1073741844_1027] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/cluster_7a19b596-aa5b-5866-3868-b2473a5bf7fd/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/cluster_7a19b596-aa5b-5866-3868-b2473a5bf7fd/data/data8]'}, localName='127.0.0.1:46657', datanodeUuid='c07e50c5-97c3-4ced-b66a-aa7258388f1b', xmitsInProgress=0}:Exception transferring block BP-886867469-172.17.0.2-1731761227469:blk_1073741844_1027 to mirror 127.0.0.1:37937 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T12:47:28,316 WARN [Thread-921 {}] hdfs.DataStreamer(1731): Error Recovery for BP-886867469-172.17.0.2-1731761227469:blk_1073741844_1027 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46657,DS-237c4a1c-6bb3-4081-85ce-248206a8d4b3,DISK], DatanodeInfoWithStorage[127.0.0.1:37937,DS-2055084b-7869-4d0c-b186-25cbf66349fb,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37937,DS-2055084b-7869-4d0c-b186-25cbf66349fb,DISK]) is bad. 2024-11-16T12:47:28,316 WARN [Thread-921 {}] hdfs.DataStreamer(1850): Abandoning BP-886867469-172.17.0.2-1731761227469:blk_1073741844_1027 2024-11-16T12:47:28,316 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1287641734_22 at /127.0.0.1:40134 [Receiving block BP-886867469-172.17.0.2-1731761227469:blk_1073741844_1027] {}] datanode.BlockReceiver(316): Block 1073741844 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-16T12:47:28,316 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1287641734_22 at /127.0.0.1:40134 [Receiving block BP-886867469-172.17.0.2-1731761227469:blk_1073741844_1027] {}] datanode.DataXceiver(331): 127.0.0.1:46657:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40134 dst: /127.0.0.1:46657 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T12:47:28,316 WARN [Thread-921 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37937,DS-2055084b-7869-4d0c-b186-25cbf66349fb,DISK] 2024-11-16T12:47:28,318 WARN [Thread-921 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741845_1028 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:28,318 WARN [Thread-921 {}] hdfs.DataStreamer(1731): Error Recovery for BP-886867469-172.17.0.2-1731761227469:blk_1073741845_1028 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34023,DS-7068dce2-5d81-4029-bc2f-296d6fc5c0df,DISK], DatanodeInfoWithStorage[127.0.0.1:46657,DS-237c4a1c-6bb3-4081-85ce-248206a8d4b3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34023,DS-7068dce2-5d81-4029-bc2f-296d6fc5c0df,DISK]) is bad. 2024-11-16T12:47:28,318 WARN [Thread-921 {}] hdfs.DataStreamer(1850): Abandoning BP-886867469-172.17.0.2-1731761227469:blk_1073741845_1028 2024-11-16T12:47:28,319 WARN [Thread-921 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34023,DS-7068dce2-5d81-4029-bc2f-296d6fc5c0df,DISK] 2024-11-16T12:47:28,321 WARN [Thread-921 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741846_1029 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:39047 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:28,321 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1287641734_22 at /127.0.0.1:40150 [Receiving block BP-886867469-172.17.0.2-1731761227469:blk_1073741846_1029] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/cluster_7a19b596-aa5b-5866-3868-b2473a5bf7fd/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/cluster_7a19b596-aa5b-5866-3868-b2473a5bf7fd/data/data8]'}, localName='127.0.0.1:46657', datanodeUuid='c07e50c5-97c3-4ced-b66a-aa7258388f1b', xmitsInProgress=0}:Exception transferring block BP-886867469-172.17.0.2-1731761227469:blk_1073741846_1029 to mirror 127.0.0.1:39047 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T12:47:28,322 WARN [Thread-921 {}] hdfs.DataStreamer(1731): Error Recovery for BP-886867469-172.17.0.2-1731761227469:blk_1073741846_1029 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46657,DS-237c4a1c-6bb3-4081-85ce-248206a8d4b3,DISK], DatanodeInfoWithStorage[127.0.0.1:39047,DS-bf04fae8-0ed5-4484-9b52-e6e454497ab0,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39047,DS-bf04fae8-0ed5-4484-9b52-e6e454497ab0,DISK]) is bad. 2024-11-16T12:47:28,322 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1287641734_22 at /127.0.0.1:40150 [Receiving block BP-886867469-172.17.0.2-1731761227469:blk_1073741846_1029] {}] datanode.BlockReceiver(316): Block 1073741846 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-16T12:47:28,322 WARN [Thread-921 {}] hdfs.DataStreamer(1850): Abandoning BP-886867469-172.17.0.2-1731761227469:blk_1073741846_1029 2024-11-16T12:47:28,322 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1287641734_22 at /127.0.0.1:40150 [Receiving block BP-886867469-172.17.0.2-1731761227469:blk_1073741846_1029] {}] datanode.DataXceiver(331): 127.0.0.1:46657:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40150 dst: /127.0.0.1:46657 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T12:47:28,322 WARN [Thread-921 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39047,DS-bf04fae8-0ed5-4484-9b52-e6e454497ab0,DISK] 2024-11-16T12:47:28,323 WARN [IPC Server handler 2 on default port 39393 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-16T12:47:28,323 WARN [IPC Server handler 2 on default port 39393 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-16T12:47:28,324 WARN [IPC Server handler 2 on default port 39393 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-16T12:47:28,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46657 is added to blk_1073741847_1030 (size=10347) 2024-11-16T12:47:28,611 INFO [regionserver/0450ab8807f5:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34023,DS-7068dce2-5d81-4029-bc2f-296d6fc5c0df,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:28,728 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d3649421ab101859b27c0f36943512c7/.tmp/info/d62a6b3f21c542f7be6c35e45cb60f73 2024-11-16T12:47:28,740 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d3649421ab101859b27c0f36943512c7/.tmp/info/d62a6b3f21c542f7be6c35e45cb60f73 as hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d3649421ab101859b27c0f36943512c7/info/d62a6b3f21c542f7be6c35e45cb60f73 2024-11-16T12:47:28,748 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d3649421ab101859b27c0f36943512c7/info/d62a6b3f21c542f7be6c35e45cb60f73, entries=5, sequenceid=11, filesize=10.1 K 2024-11-16T12:47:28,750 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=9.45 KB/9681 for d3649421ab101859b27c0f36943512c7 in 462ms, sequenceid=11, compaction requested=false 2024-11-16T12:47:28,750 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for d3649421ab101859b27c0f36943512c7: 2024-11-16T12:47:28,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33749 {}] regionserver.HRegion(8855): Flush requested on d3649421ab101859b27c0f36943512c7 2024-11-16T12:47:28,916 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing d3649421ab101859b27c0f36943512c7 1/1 column families, dataSize=10.50 KB heapSize=11.50 KB 2024-11-16T12:47:28,924 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d3649421ab101859b27c0f36943512c7/.tmp/info/57155b9bd4324e5393d4f5a5eeec47af is 1080, key is row0007/info:/1731761248290/Put/seqid=0 2024-11-16T12:47:28,926 WARN [Thread-928 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741848_1031 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:28,927 WARN [Thread-928 {}] hdfs.DataStreamer(1731): Error Recovery for BP-886867469-172.17.0.2-1731761227469:blk_1073741848_1031 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39047,DS-bf04fae8-0ed5-4484-9b52-e6e454497ab0,DISK], DatanodeInfoWithStorage[127.0.0.1:41753,DS-8258fc71-cd75-42d7-8209-2e094c48164b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39047,DS-bf04fae8-0ed5-4484-9b52-e6e454497ab0,DISK]) is bad. 2024-11-16T12:47:28,927 WARN [Thread-928 {}] hdfs.DataStreamer(1850): Abandoning BP-886867469-172.17.0.2-1731761227469:blk_1073741848_1031 2024-11-16T12:47:28,927 WARN [Thread-928 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39047,DS-bf04fae8-0ed5-4484-9b52-e6e454497ab0,DISK] 2024-11-16T12:47:28,928 WARN [Thread-928 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741849_1032 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:28,928 WARN [Thread-928 {}] hdfs.DataStreamer(1731): Error Recovery for BP-886867469-172.17.0.2-1731761227469:blk_1073741849_1032 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41753,DS-8258fc71-cd75-42d7-8209-2e094c48164b,DISK], DatanodeInfoWithStorage[127.0.0.1:34023,DS-7068dce2-5d81-4029-bc2f-296d6fc5c0df,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41753,DS-8258fc71-cd75-42d7-8209-2e094c48164b,DISK]) is bad. 2024-11-16T12:47:28,929 WARN [Thread-928 {}] hdfs.DataStreamer(1850): Abandoning BP-886867469-172.17.0.2-1731761227469:blk_1073741849_1032 2024-11-16T12:47:28,929 WARN [Thread-928 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41753,DS-8258fc71-cd75-42d7-8209-2e094c48164b,DISK] 2024-11-16T12:47:28,931 WARN [Thread-928 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741850_1033 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37937 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:28,931 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1287641734_22 at /127.0.0.1:40172 [Receiving block BP-886867469-172.17.0.2-1731761227469:blk_1073741850_1033] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/cluster_7a19b596-aa5b-5866-3868-b2473a5bf7fd/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/cluster_7a19b596-aa5b-5866-3868-b2473a5bf7fd/data/data8]'}, localName='127.0.0.1:46657', datanodeUuid='c07e50c5-97c3-4ced-b66a-aa7258388f1b', xmitsInProgress=0}:Exception transferring block BP-886867469-172.17.0.2-1731761227469:blk_1073741850_1033 to mirror 127.0.0.1:37937 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T12:47:28,931 WARN [Thread-928 {}] hdfs.DataStreamer(1731): Error Recovery for BP-886867469-172.17.0.2-1731761227469:blk_1073741850_1033 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46657,DS-237c4a1c-6bb3-4081-85ce-248206a8d4b3,DISK], DatanodeInfoWithStorage[127.0.0.1:37937,DS-2055084b-7869-4d0c-b186-25cbf66349fb,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37937,DS-2055084b-7869-4d0c-b186-25cbf66349fb,DISK]) is bad. 2024-11-16T12:47:28,931 WARN [Thread-928 {}] hdfs.DataStreamer(1850): Abandoning BP-886867469-172.17.0.2-1731761227469:blk_1073741850_1033 2024-11-16T12:47:28,931 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1287641734_22 at /127.0.0.1:40172 [Receiving block BP-886867469-172.17.0.2-1731761227469:blk_1073741850_1033] {}] datanode.BlockReceiver(316): Block 1073741850 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-16T12:47:28,931 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1287641734_22 at /127.0.0.1:40172 [Receiving block BP-886867469-172.17.0.2-1731761227469:blk_1073741850_1033] {}] datanode.DataXceiver(331): 127.0.0.1:46657:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40172 dst: /127.0.0.1:46657 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T12:47:28,932 WARN [Thread-928 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37937,DS-2055084b-7869-4d0c-b186-25cbf66349fb,DISK] 2024-11-16T12:47:28,934 WARN [Thread-928 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741851_1034 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34023 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:28,934 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1287641734_22 at /127.0.0.1:40182 [Receiving block BP-886867469-172.17.0.2-1731761227469:blk_1073741851_1034] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/cluster_7a19b596-aa5b-5866-3868-b2473a5bf7fd/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/cluster_7a19b596-aa5b-5866-3868-b2473a5bf7fd/data/data8]'}, localName='127.0.0.1:46657', datanodeUuid='c07e50c5-97c3-4ced-b66a-aa7258388f1b', xmitsInProgress=0}:Exception transferring block BP-886867469-172.17.0.2-1731761227469:blk_1073741851_1034 to mirror 127.0.0.1:34023 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T12:47:28,934 WARN [Thread-928 {}] hdfs.DataStreamer(1731): Error Recovery for BP-886867469-172.17.0.2-1731761227469:blk_1073741851_1034 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46657,DS-237c4a1c-6bb3-4081-85ce-248206a8d4b3,DISK], DatanodeInfoWithStorage[127.0.0.1:34023,DS-7068dce2-5d81-4029-bc2f-296d6fc5c0df,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34023,DS-7068dce2-5d81-4029-bc2f-296d6fc5c0df,DISK]) is bad. 2024-11-16T12:47:28,934 WARN [Thread-928 {}] hdfs.DataStreamer(1850): Abandoning BP-886867469-172.17.0.2-1731761227469:blk_1073741851_1034 2024-11-16T12:47:28,934 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1287641734_22 at /127.0.0.1:40182 [Receiving block BP-886867469-172.17.0.2-1731761227469:blk_1073741851_1034] {}] datanode.BlockReceiver(316): Block 1073741851 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-16T12:47:28,934 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1287641734_22 at /127.0.0.1:40182 [Receiving block BP-886867469-172.17.0.2-1731761227469:blk_1073741851_1034] {}] datanode.DataXceiver(331): 127.0.0.1:46657:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40182 dst: /127.0.0.1:46657 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T12:47:28,935 WARN [Thread-928 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34023,DS-7068dce2-5d81-4029-bc2f-296d6fc5c0df,DISK] 2024-11-16T12:47:28,935 WARN [IPC Server handler 2 on default port 39393 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-16T12:47:28,935 WARN [IPC Server handler 2 on default port 39393 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-16T12:47:28,936 WARN [IPC Server handler 2 on default port 39393 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-16T12:47:28,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46657 is added to blk_1073741852_1035 (size=12506) 2024-11-16T12:47:29,340 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.50 KB at sequenceid=24 (bloomFilter=true), to=hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d3649421ab101859b27c0f36943512c7/.tmp/info/57155b9bd4324e5393d4f5a5eeec47af 2024-11-16T12:47:29,346 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d3649421ab101859b27c0f36943512c7/.tmp/info/57155b9bd4324e5393d4f5a5eeec47af as hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d3649421ab101859b27c0f36943512c7/info/57155b9bd4324e5393d4f5a5eeec47af 2024-11-16T12:47:29,352 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d3649421ab101859b27c0f36943512c7/info/57155b9bd4324e5393d4f5a5eeec47af, entries=7, sequenceid=24, filesize=12.2 K 2024-11-16T12:47:29,353 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.50 KB/10756, heapSize ~11.48 KB/11760, currentSize=2.10 KB/2150 for d3649421ab101859b27c0f36943512c7 in 437ms, sequenceid=24, compaction requested=false 2024-11-16T12:47:29,353 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for d3649421ab101859b27c0f36943512c7: 2024-11-16T12:47:29,354 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=22.3 K, sizeToCheck=16.0 K 2024-11-16T12:47:29,354 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T12:47:29,354 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d3649421ab101859b27c0f36943512c7/info/57155b9bd4324e5393d4f5a5eeec47af because midkey is the same as first or last row 2024-11-16T12:47:29,357 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34023,DS-7068dce2-5d81-4029-bc2f-296d6fc5c0df,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:30,010 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@6a667fd4[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:46657, datanodeUuid=c07e50c5-97c3-4ced-b66a-aa7258388f1b, infoPort=34707, infoSecurePort=0, ipcPort=34013, storageInfo=lv=-57;cid=testClusterID;nsid=2033418441;c=1731761227469):Failed to transfer BP-886867469-172.17.0.2-1731761227469:blk_1073741847_1030 to 127.0.0.1:39047 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T12:47:30,010 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@2b42e84b[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:46657, datanodeUuid=c07e50c5-97c3-4ced-b66a-aa7258388f1b, infoPort=34707, infoSecurePort=0, ipcPort=34013, storageInfo=lv=-57;cid=testClusterID;nsid=2033418441;c=1731761227469):Failed to transfer BP-886867469-172.17.0.2-1731761227469:blk_1073741852_1035 to 127.0.0.1:37937 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T12:47:30,215 INFO [regionserver/0450ab8807f5:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34023,DS-7068dce2-5d81-4029-bc2f-296d6fc5c0df,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:30,215 WARN [regionserver/0450ab8807f5:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46657,DS-237c4a1c-6bb3-4081-85ce-248206a8d4b3,DISK]] 2024-11-16T12:47:30,215 DEBUG [regionserver/0450ab8807f5:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 0450ab8807f5%2C33749%2C1731761229240:(num 1731761246185) roll requested 2024-11-16T12:47:30,216 INFO [regionserver/0450ab8807f5:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0450ab8807f5%2C33749%2C1731761229240.1731761250215 2024-11-16T12:47:30,218 WARN [Thread-938 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741853_1036 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:30,218 WARN [Thread-938 {}] hdfs.DataStreamer(1731): Error Recovery for BP-886867469-172.17.0.2-1731761227469:blk_1073741853_1036 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34023,DS-7068dce2-5d81-4029-bc2f-296d6fc5c0df,DISK], DatanodeInfoWithStorage[127.0.0.1:37937,DS-2055084b-7869-4d0c-b186-25cbf66349fb,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34023,DS-7068dce2-5d81-4029-bc2f-296d6fc5c0df,DISK]) is bad. 2024-11-16T12:47:30,218 WARN [Thread-938 {}] hdfs.DataStreamer(1850): Abandoning BP-886867469-172.17.0.2-1731761227469:blk_1073741853_1036 2024-11-16T12:47:30,219 WARN [Thread-938 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34023,DS-7068dce2-5d81-4029-bc2f-296d6fc5c0df,DISK] 2024-11-16T12:47:30,220 WARN [Thread-938 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741854_1037 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:30,220 WARN [Thread-938 {}] hdfs.DataStreamer(1731): Error Recovery for BP-886867469-172.17.0.2-1731761227469:blk_1073741854_1037 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41753,DS-8258fc71-cd75-42d7-8209-2e094c48164b,DISK], DatanodeInfoWithStorage[127.0.0.1:37937,DS-2055084b-7869-4d0c-b186-25cbf66349fb,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41753,DS-8258fc71-cd75-42d7-8209-2e094c48164b,DISK]) is bad. 2024-11-16T12:47:30,220 WARN [Thread-938 {}] hdfs.DataStreamer(1850): Abandoning BP-886867469-172.17.0.2-1731761227469:blk_1073741854_1037 2024-11-16T12:47:30,221 WARN [Thread-938 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41753,DS-8258fc71-cd75-42d7-8209-2e094c48164b,DISK] 2024-11-16T12:47:30,222 WARN [Thread-938 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741855_1038 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:30,222 WARN [Thread-938 {}] hdfs.DataStreamer(1731): Error Recovery for BP-886867469-172.17.0.2-1731761227469:blk_1073741855_1038 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37937,DS-2055084b-7869-4d0c-b186-25cbf66349fb,DISK], DatanodeInfoWithStorage[127.0.0.1:46657,DS-237c4a1c-6bb3-4081-85ce-248206a8d4b3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37937,DS-2055084b-7869-4d0c-b186-25cbf66349fb,DISK]) is bad. 2024-11-16T12:47:30,222 WARN [Thread-938 {}] hdfs.DataStreamer(1850): Abandoning BP-886867469-172.17.0.2-1731761227469:blk_1073741855_1038 2024-11-16T12:47:30,223 WARN [Thread-938 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37937,DS-2055084b-7869-4d0c-b186-25cbf66349fb,DISK] 2024-11-16T12:47:30,225 WARN [Thread-938 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741856_1039 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:39047 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:30,225 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1287641734_22 at /127.0.0.1:40188 [Receiving block BP-886867469-172.17.0.2-1731761227469:blk_1073741856_1039] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/cluster_7a19b596-aa5b-5866-3868-b2473a5bf7fd/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/cluster_7a19b596-aa5b-5866-3868-b2473a5bf7fd/data/data8]'}, localName='127.0.0.1:46657', datanodeUuid='c07e50c5-97c3-4ced-b66a-aa7258388f1b', xmitsInProgress=0}:Exception transferring block BP-886867469-172.17.0.2-1731761227469:blk_1073741856_1039 to mirror 127.0.0.1:39047 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T12:47:30,225 WARN [Thread-938 {}] hdfs.DataStreamer(1731): Error Recovery for BP-886867469-172.17.0.2-1731761227469:blk_1073741856_1039 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46657,DS-237c4a1c-6bb3-4081-85ce-248206a8d4b3,DISK], DatanodeInfoWithStorage[127.0.0.1:39047,DS-bf04fae8-0ed5-4484-9b52-e6e454497ab0,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39047,DS-bf04fae8-0ed5-4484-9b52-e6e454497ab0,DISK]) is bad. 2024-11-16T12:47:30,225 WARN [Thread-938 {}] hdfs.DataStreamer(1850): Abandoning BP-886867469-172.17.0.2-1731761227469:blk_1073741856_1039 2024-11-16T12:47:30,225 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1287641734_22 at /127.0.0.1:40188 [Receiving block BP-886867469-172.17.0.2-1731761227469:blk_1073741856_1039] {}] datanode.BlockReceiver(316): Block 1073741856 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-16T12:47:30,225 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1287641734_22 at /127.0.0.1:40188 [Receiving block BP-886867469-172.17.0.2-1731761227469:blk_1073741856_1039] {}] datanode.DataXceiver(331): 127.0.0.1:46657:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40188 dst: /127.0.0.1:46657 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T12:47:30,226 WARN [Thread-938 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39047,DS-bf04fae8-0ed5-4484-9b52-e6e454497ab0,DISK] 2024-11-16T12:47:30,226 WARN [IPC Server handler 4 on default port 39393 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-16T12:47:30,226 WARN [IPC Server handler 4 on default port 39393 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-16T12:47:30,226 WARN [IPC Server handler 4 on default port 39393 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-16T12:47:30,229 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:47:30,229 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:47:30,229 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:47:30,229 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:47:30,229 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:47:30,230 INFO [regionserver/0450ab8807f5:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.1731761246185 with entries=25, filesize=25.38 KB; new WAL /user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.1731761250215 2024-11-16T12:47:30,230 DEBUG [regionserver/0450ab8807f5:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34707:34707)] 2024-11-16T12:47:30,230 DEBUG [regionserver/0450ab8807f5:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.1731761229866 is not closed yet, will try archiving it next time 2024-11-16T12:47:30,230 DEBUG [regionserver/0450ab8807f5:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.1731761246185 is not closed yet, will try archiving it next time 2024-11-16T12:47:30,231 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.1731761242146 to hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/oldWALs/0450ab8807f5%2C33749%2C1731761229240.1731761242146 2024-11-16T12:47:30,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46657 is added to blk_1073741842_1025 (size=25992) 2024-11-16T12:47:30,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33749 {}] regionserver.HRegion(8855): Flush requested on d3649421ab101859b27c0f36943512c7 2024-11-16T12:47:30,348 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing d3649421ab101859b27c0f36943512c7 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-16T12:47:30,354 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d3649421ab101859b27c0f36943512c7/.tmp/info/8116c10d294f4b49a230a05e927820b3 is 1079, key is tmprow/info:/1731761250346/Put/seqid=0 2024-11-16T12:47:30,356 WARN [Thread-943 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741858_1041 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:30,356 WARN [Thread-943 {}] hdfs.DataStreamer(1731): Error Recovery for BP-886867469-172.17.0.2-1731761227469:blk_1073741858_1041 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41753,DS-8258fc71-cd75-42d7-8209-2e094c48164b,DISK], DatanodeInfoWithStorage[127.0.0.1:39047,DS-bf04fae8-0ed5-4484-9b52-e6e454497ab0,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41753,DS-8258fc71-cd75-42d7-8209-2e094c48164b,DISK]) is bad. 2024-11-16T12:47:30,356 WARN [Thread-943 {}] hdfs.DataStreamer(1850): Abandoning BP-886867469-172.17.0.2-1731761227469:blk_1073741858_1041 2024-11-16T12:47:30,357 WARN [Thread-943 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41753,DS-8258fc71-cd75-42d7-8209-2e094c48164b,DISK] 2024-11-16T12:47:30,360 WARN [Thread-943 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741859_1042 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34023 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:30,360 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1287641734_22 at /127.0.0.1:40204 [Receiving block BP-886867469-172.17.0.2-1731761227469:blk_1073741859_1042] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/cluster_7a19b596-aa5b-5866-3868-b2473a5bf7fd/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/cluster_7a19b596-aa5b-5866-3868-b2473a5bf7fd/data/data8]'}, localName='127.0.0.1:46657', datanodeUuid='c07e50c5-97c3-4ced-b66a-aa7258388f1b', xmitsInProgress=0}:Exception transferring block BP-886867469-172.17.0.2-1731761227469:blk_1073741859_1042 to mirror 127.0.0.1:34023 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T12:47:30,360 WARN [Thread-943 {}] hdfs.DataStreamer(1731): Error Recovery for BP-886867469-172.17.0.2-1731761227469:blk_1073741859_1042 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46657,DS-237c4a1c-6bb3-4081-85ce-248206a8d4b3,DISK], DatanodeInfoWithStorage[127.0.0.1:34023,DS-7068dce2-5d81-4029-bc2f-296d6fc5c0df,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34023,DS-7068dce2-5d81-4029-bc2f-296d6fc5c0df,DISK]) is bad. 2024-11-16T12:47:30,360 WARN [Thread-943 {}] hdfs.DataStreamer(1850): Abandoning BP-886867469-172.17.0.2-1731761227469:blk_1073741859_1042 2024-11-16T12:47:30,360 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1287641734_22 at /127.0.0.1:40204 [Receiving block BP-886867469-172.17.0.2-1731761227469:blk_1073741859_1042] {}] datanode.BlockReceiver(316): Block 1073741859 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-16T12:47:30,361 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1287641734_22 at /127.0.0.1:40204 [Receiving block BP-886867469-172.17.0.2-1731761227469:blk_1073741859_1042] {}] datanode.DataXceiver(331): 127.0.0.1:46657:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40204 dst: /127.0.0.1:46657 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T12:47:30,361 WARN [Thread-943 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34023,DS-7068dce2-5d81-4029-bc2f-296d6fc5c0df,DISK] 2024-11-16T12:47:30,363 WARN [Thread-943 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741860_1043 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:30,363 WARN [Thread-943 {}] hdfs.DataStreamer(1731): Error Recovery for BP-886867469-172.17.0.2-1731761227469:blk_1073741860_1043 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39047,DS-bf04fae8-0ed5-4484-9b52-e6e454497ab0,DISK], DatanodeInfoWithStorage[127.0.0.1:46657,DS-237c4a1c-6bb3-4081-85ce-248206a8d4b3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39047,DS-bf04fae8-0ed5-4484-9b52-e6e454497ab0,DISK]) is bad. 2024-11-16T12:47:30,363 WARN [Thread-943 {}] hdfs.DataStreamer(1850): Abandoning BP-886867469-172.17.0.2-1731761227469:blk_1073741860_1043 2024-11-16T12:47:30,364 WARN [Thread-943 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39047,DS-bf04fae8-0ed5-4484-9b52-e6e454497ab0,DISK] 2024-11-16T12:47:30,367 WARN [Thread-943 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741861_1044 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37937 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:30,367 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1287641734_22 at /127.0.0.1:40210 [Receiving block BP-886867469-172.17.0.2-1731761227469:blk_1073741861_1044] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/cluster_7a19b596-aa5b-5866-3868-b2473a5bf7fd/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/cluster_7a19b596-aa5b-5866-3868-b2473a5bf7fd/data/data8]'}, localName='127.0.0.1:46657', datanodeUuid='c07e50c5-97c3-4ced-b66a-aa7258388f1b', xmitsInProgress=0}:Exception transferring block BP-886867469-172.17.0.2-1731761227469:blk_1073741861_1044 to mirror 127.0.0.1:37937 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T12:47:30,367 WARN [Thread-943 {}] hdfs.DataStreamer(1731): Error Recovery for BP-886867469-172.17.0.2-1731761227469:blk_1073741861_1044 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46657,DS-237c4a1c-6bb3-4081-85ce-248206a8d4b3,DISK], DatanodeInfoWithStorage[127.0.0.1:37937,DS-2055084b-7869-4d0c-b186-25cbf66349fb,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37937,DS-2055084b-7869-4d0c-b186-25cbf66349fb,DISK]) is bad. 2024-11-16T12:47:30,367 WARN [Thread-943 {}] hdfs.DataStreamer(1850): Abandoning BP-886867469-172.17.0.2-1731761227469:blk_1073741861_1044 2024-11-16T12:47:30,367 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1287641734_22 at /127.0.0.1:40210 [Receiving block BP-886867469-172.17.0.2-1731761227469:blk_1073741861_1044] {}] datanode.BlockReceiver(316): Block 1073741861 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-16T12:47:30,367 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1287641734_22 at /127.0.0.1:40210 [Receiving block BP-886867469-172.17.0.2-1731761227469:blk_1073741861_1044] {}] datanode.DataXceiver(331): 127.0.0.1:46657:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40210 dst: /127.0.0.1:46657 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T12:47:30,368 WARN [Thread-943 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37937,DS-2055084b-7869-4d0c-b186-25cbf66349fb,DISK] 2024-11-16T12:47:30,369 WARN [IPC Server handler 3 on default port 39393 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-16T12:47:30,369 WARN [IPC Server handler 3 on default port 39393 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-16T12:47:30,369 WARN [IPC Server handler 3 on default port 39393 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-16T12:47:30,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46657 is added to blk_1073741862_1045 (size=6027) 2024-11-16T12:47:30,611 INFO [regionserver/0450ab8807f5:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34023,DS-7068dce2-5d81-4029-bc2f-296d6fc5c0df,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:30,632 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.1731761229866 is not closed yet, will try archiving it next time 2024-11-16T12:47:30,775 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=34 (bloomFilter=true), to=hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d3649421ab101859b27c0f36943512c7/.tmp/info/8116c10d294f4b49a230a05e927820b3 2024-11-16T12:47:30,789 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d3649421ab101859b27c0f36943512c7/.tmp/info/8116c10d294f4b49a230a05e927820b3 as hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d3649421ab101859b27c0f36943512c7/info/8116c10d294f4b49a230a05e927820b3 2024-11-16T12:47:30,798 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d3649421ab101859b27c0f36943512c7/info/8116c10d294f4b49a230a05e927820b3, entries=1, sequenceid=34, filesize=5.9 K 2024-11-16T12:47:30,799 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for d3649421ab101859b27c0f36943512c7 in 452ms, sequenceid=34, compaction requested=true 2024-11-16T12:47:30,799 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for d3649421ab101859b27c0f36943512c7: 2024-11-16T12:47:30,799 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.2 K, sizeToCheck=16.0 K 2024-11-16T12:47:30,799 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T12:47:30,799 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d3649421ab101859b27c0f36943512c7/info/57155b9bd4324e5393d4f5a5eeec47af because midkey is the same as first or last row 2024-11-16T12:47:30,799 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d3649421ab101859b27c0f36943512c7:info, priority=-2147483648, current under compaction store size is 1 2024-11-16T12:47:30,800 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T12:47:30,800 DEBUG [RS:0;0450ab8807f5:33749-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-16T12:47:30,801 DEBUG [RS:0;0450ab8807f5:33749-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 28880 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-16T12:47:30,801 DEBUG [RS:0;0450ab8807f5:33749-shortCompactions-0 {}] regionserver.HStore(1541): d3649421ab101859b27c0f36943512c7/info is initiating minor compaction (all files) 2024-11-16T12:47:30,801 INFO [RS:0;0450ab8807f5:33749-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of d3649421ab101859b27c0f36943512c7/info in TestLogRolling-testLogRollOnDatanodeDeath,,1731761230641.d3649421ab101859b27c0f36943512c7. 2024-11-16T12:47:30,802 INFO [RS:0;0450ab8807f5:33749-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d3649421ab101859b27c0f36943512c7/info/d62a6b3f21c542f7be6c35e45cb60f73, hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d3649421ab101859b27c0f36943512c7/info/57155b9bd4324e5393d4f5a5eeec47af, hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d3649421ab101859b27c0f36943512c7/info/8116c10d294f4b49a230a05e927820b3] into tmpdir=hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d3649421ab101859b27c0f36943512c7/.tmp, totalSize=28.2 K 2024-11-16T12:47:30,802 DEBUG [RS:0;0450ab8807f5:33749-shortCompactions-0 {}] compactions.Compactor(225): Compacting d62a6b3f21c542f7be6c35e45cb60f73, keycount=5, bloomtype=ROW, size=10.1 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731761244230 2024-11-16T12:47:30,803 DEBUG [RS:0;0450ab8807f5:33749-shortCompactions-0 {}] compactions.Compactor(225): Compacting 57155b9bd4324e5393d4f5a5eeec47af, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=24, earliestPutTs=1731761248290 2024-11-16T12:47:30,803 DEBUG [RS:0;0450ab8807f5:33749-shortCompactions-0 {}] compactions.Compactor(225): Compacting 8116c10d294f4b49a230a05e927820b3, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1731761250346 2024-11-16T12:47:30,818 INFO [RS:0;0450ab8807f5:33749-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d3649421ab101859b27c0f36943512c7#info#compaction#21 average throughput is 6.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-16T12:47:30,819 DEBUG [RS:0;0450ab8807f5:33749-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d3649421ab101859b27c0f36943512c7/.tmp/info/deafd3bbe23e4ca482fe11e6d4dedd37 is 1080, key is row0002/info:/1731761244230/Put/seqid=0 2024-11-16T12:47:30,822 WARN [Thread-951 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741863_1046 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41753 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:30,822 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1287641734_22 at /127.0.0.1:40250 [Receiving block BP-886867469-172.17.0.2-1731761227469:blk_1073741863_1046] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/cluster_7a19b596-aa5b-5866-3868-b2473a5bf7fd/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/cluster_7a19b596-aa5b-5866-3868-b2473a5bf7fd/data/data8]'}, localName='127.0.0.1:46657', datanodeUuid='c07e50c5-97c3-4ced-b66a-aa7258388f1b', xmitsInProgress=0}:Exception transferring block BP-886867469-172.17.0.2-1731761227469:blk_1073741863_1046 to mirror 127.0.0.1:41753 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T12:47:30,822 WARN [Thread-951 {}] hdfs.DataStreamer(1731): Error Recovery for BP-886867469-172.17.0.2-1731761227469:blk_1073741863_1046 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46657,DS-237c4a1c-6bb3-4081-85ce-248206a8d4b3,DISK], DatanodeInfoWithStorage[127.0.0.1:41753,DS-8258fc71-cd75-42d7-8209-2e094c48164b,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41753,DS-8258fc71-cd75-42d7-8209-2e094c48164b,DISK]) is bad. 2024-11-16T12:47:30,822 WARN [Thread-951 {}] hdfs.DataStreamer(1850): Abandoning BP-886867469-172.17.0.2-1731761227469:blk_1073741863_1046 2024-11-16T12:47:30,822 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1287641734_22 at /127.0.0.1:40250 [Receiving block BP-886867469-172.17.0.2-1731761227469:blk_1073741863_1046] {}] datanode.BlockReceiver(316): Block 1073741863 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-16T12:47:30,822 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1287641734_22 at /127.0.0.1:40250 [Receiving block BP-886867469-172.17.0.2-1731761227469:blk_1073741863_1046] {}] datanode.DataXceiver(331): 127.0.0.1:46657:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40250 dst: /127.0.0.1:46657 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T12:47:30,823 WARN [Thread-951 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41753,DS-8258fc71-cd75-42d7-8209-2e094c48164b,DISK] 2024-11-16T12:47:30,824 WARN [Thread-951 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741864_1047 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:30,824 WARN [Thread-951 {}] hdfs.DataStreamer(1731): Error Recovery for BP-886867469-172.17.0.2-1731761227469:blk_1073741864_1047 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39047,DS-bf04fae8-0ed5-4484-9b52-e6e454497ab0,DISK], DatanodeInfoWithStorage[127.0.0.1:37937,DS-2055084b-7869-4d0c-b186-25cbf66349fb,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39047,DS-bf04fae8-0ed5-4484-9b52-e6e454497ab0,DISK]) is bad. 2024-11-16T12:47:30,824 WARN [Thread-951 {}] hdfs.DataStreamer(1850): Abandoning BP-886867469-172.17.0.2-1731761227469:blk_1073741864_1047 2024-11-16T12:47:30,825 WARN [Thread-951 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39047,DS-bf04fae8-0ed5-4484-9b52-e6e454497ab0,DISK] 2024-11-16T12:47:30,826 WARN [Thread-951 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741865_1048 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:30,826 WARN [Thread-951 {}] hdfs.DataStreamer(1731): Error Recovery for BP-886867469-172.17.0.2-1731761227469:blk_1073741865_1048 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34023,DS-7068dce2-5d81-4029-bc2f-296d6fc5c0df,DISK], DatanodeInfoWithStorage[127.0.0.1:37937,DS-2055084b-7869-4d0c-b186-25cbf66349fb,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34023,DS-7068dce2-5d81-4029-bc2f-296d6fc5c0df,DISK]) is bad. 2024-11-16T12:47:30,826 WARN [Thread-951 {}] hdfs.DataStreamer(1850): Abandoning BP-886867469-172.17.0.2-1731761227469:blk_1073741865_1048 2024-11-16T12:47:30,826 WARN [Thread-951 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34023,DS-7068dce2-5d81-4029-bc2f-296d6fc5c0df,DISK] 2024-11-16T12:47:30,829 WARN [Thread-951 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741866_1049 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37937 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:30,829 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1287641734_22 at /127.0.0.1:40258 [Receiving block BP-886867469-172.17.0.2-1731761227469:blk_1073741866_1049] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/cluster_7a19b596-aa5b-5866-3868-b2473a5bf7fd/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/cluster_7a19b596-aa5b-5866-3868-b2473a5bf7fd/data/data8]'}, localName='127.0.0.1:46657', datanodeUuid='c07e50c5-97c3-4ced-b66a-aa7258388f1b', xmitsInProgress=0}:Exception transferring block BP-886867469-172.17.0.2-1731761227469:blk_1073741866_1049 to mirror 127.0.0.1:37937 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T12:47:30,829 WARN [Thread-951 {}] hdfs.DataStreamer(1731): Error Recovery for BP-886867469-172.17.0.2-1731761227469:blk_1073741866_1049 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46657,DS-237c4a1c-6bb3-4081-85ce-248206a8d4b3,DISK], DatanodeInfoWithStorage[127.0.0.1:37937,DS-2055084b-7869-4d0c-b186-25cbf66349fb,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37937,DS-2055084b-7869-4d0c-b186-25cbf66349fb,DISK]) is bad. 2024-11-16T12:47:30,829 WARN [Thread-951 {}] hdfs.DataStreamer(1850): Abandoning BP-886867469-172.17.0.2-1731761227469:blk_1073741866_1049 2024-11-16T12:47:30,829 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1287641734_22 at /127.0.0.1:40258 [Receiving block BP-886867469-172.17.0.2-1731761227469:blk_1073741866_1049] {}] datanode.BlockReceiver(316): Block 1073741866 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-16T12:47:30,829 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1287641734_22 at /127.0.0.1:40258 [Receiving block BP-886867469-172.17.0.2-1731761227469:blk_1073741866_1049] {}] datanode.DataXceiver(331): 127.0.0.1:46657:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40258 dst: /127.0.0.1:46657 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T12:47:30,829 WARN [Thread-951 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37937,DS-2055084b-7869-4d0c-b186-25cbf66349fb,DISK] 2024-11-16T12:47:30,830 WARN [IPC Server handler 2 on default port 39393 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-16T12:47:30,830 WARN [IPC Server handler 2 on default port 39393 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-16T12:47:30,830 WARN [IPC Server handler 2 on default port 39393 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-16T12:47:30,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46657 is added to blk_1073741867_1050 (size=17994) 2024-11-16T12:47:31,248 DEBUG [RS:0;0450ab8807f5:33749-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d3649421ab101859b27c0f36943512c7/.tmp/info/deafd3bbe23e4ca482fe11e6d4dedd37 as hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d3649421ab101859b27c0f36943512c7/info/deafd3bbe23e4ca482fe11e6d4dedd37 2024-11-16T12:47:31,257 INFO [RS:0;0450ab8807f5:33749-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in d3649421ab101859b27c0f36943512c7/info of d3649421ab101859b27c0f36943512c7 into deafd3bbe23e4ca482fe11e6d4dedd37(size=17.6 K), total size for store is 17.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-16T12:47:31,257 DEBUG [RS:0;0450ab8807f5:33749-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for d3649421ab101859b27c0f36943512c7: 2024-11-16T12:47:31,257 INFO [RS:0;0450ab8807f5:33749-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1731761230641.d3649421ab101859b27c0f36943512c7., storeName=d3649421ab101859b27c0f36943512c7/info, priority=13, startTime=1731761250799; duration=0sec 2024-11-16T12:47:31,257 DEBUG [RS:0;0450ab8807f5:33749-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-16T12:47:31,257 DEBUG [RS:0;0450ab8807f5:33749-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T12:47:31,257 DEBUG [RS:0;0450ab8807f5:33749-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d3649421ab101859b27c0f36943512c7/info/deafd3bbe23e4ca482fe11e6d4dedd37 because midkey is the same as first or last row 2024-11-16T12:47:31,257 DEBUG [RS:0;0450ab8807f5:33749-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-16T12:47:31,257 DEBUG [RS:0;0450ab8807f5:33749-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T12:47:31,258 DEBUG [RS:0;0450ab8807f5:33749-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d3649421ab101859b27c0f36943512c7/info/deafd3bbe23e4ca482fe11e6d4dedd37 because midkey is the same as first or last row 2024-11-16T12:47:31,258 DEBUG [RS:0;0450ab8807f5:33749-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-16T12:47:31,258 DEBUG [RS:0;0450ab8807f5:33749-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T12:47:31,258 DEBUG [RS:0;0450ab8807f5:33749-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d3649421ab101859b27c0f36943512c7/info/deafd3bbe23e4ca482fe11e6d4dedd37 because midkey is the same as first or last row 2024-11-16T12:47:31,258 DEBUG [RS:0;0450ab8807f5:33749-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T12:47:31,258 DEBUG [RS:0;0450ab8807f5:33749-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d3649421ab101859b27c0f36943512c7:info 2024-11-16T12:47:31,357 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34023,DS-7068dce2-5d81-4029-bc2f-296d6fc5c0df,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:31,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33749 {}] regionserver.HRegion(8855): Flush requested on d3649421ab101859b27c0f36943512c7 2024-11-16T12:47:31,773 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing d3649421ab101859b27c0f36943512c7 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-16T12:47:31,783 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d3649421ab101859b27c0f36943512c7/.tmp/info/aa7a90d88924455c8249257aa43a8515 is 1079, key is tmprow/info:/1731761251771/Put/seqid=0 2024-11-16T12:47:31,785 WARN [Thread-957 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741868_1051 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:31,786 WARN [Thread-957 {}] hdfs.DataStreamer(1731): Error Recovery for BP-886867469-172.17.0.2-1731761227469:blk_1073741868_1051 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37937,DS-2055084b-7869-4d0c-b186-25cbf66349fb,DISK], DatanodeInfoWithStorage[127.0.0.1:46657,DS-237c4a1c-6bb3-4081-85ce-248206a8d4b3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37937,DS-2055084b-7869-4d0c-b186-25cbf66349fb,DISK]) is bad. 2024-11-16T12:47:31,786 WARN [Thread-957 {}] hdfs.DataStreamer(1850): Abandoning BP-886867469-172.17.0.2-1731761227469:blk_1073741868_1051 2024-11-16T12:47:31,787 WARN [Thread-957 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37937,DS-2055084b-7869-4d0c-b186-25cbf66349fb,DISK] 2024-11-16T12:47:31,788 WARN [Thread-957 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741869_1052 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:31,788 WARN [Thread-957 {}] hdfs.DataStreamer(1731): Error Recovery for BP-886867469-172.17.0.2-1731761227469:blk_1073741869_1052 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39047,DS-bf04fae8-0ed5-4484-9b52-e6e454497ab0,DISK], DatanodeInfoWithStorage[127.0.0.1:41753,DS-8258fc71-cd75-42d7-8209-2e094c48164b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39047,DS-bf04fae8-0ed5-4484-9b52-e6e454497ab0,DISK]) is bad. 2024-11-16T12:47:31,788 WARN [Thread-957 {}] hdfs.DataStreamer(1850): Abandoning BP-886867469-172.17.0.2-1731761227469:blk_1073741869_1052 2024-11-16T12:47:31,789 WARN [Thread-957 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39047,DS-bf04fae8-0ed5-4484-9b52-e6e454497ab0,DISK] 2024-11-16T12:47:31,790 WARN [Thread-957 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741870_1053 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:31,790 WARN [Thread-957 {}] hdfs.DataStreamer(1731): Error Recovery for BP-886867469-172.17.0.2-1731761227469:blk_1073741870_1053 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34023,DS-7068dce2-5d81-4029-bc2f-296d6fc5c0df,DISK], DatanodeInfoWithStorage[127.0.0.1:46657,DS-237c4a1c-6bb3-4081-85ce-248206a8d4b3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34023,DS-7068dce2-5d81-4029-bc2f-296d6fc5c0df,DISK]) is bad. 2024-11-16T12:47:31,790 WARN [Thread-957 {}] hdfs.DataStreamer(1850): Abandoning BP-886867469-172.17.0.2-1731761227469:blk_1073741870_1053 2024-11-16T12:47:31,791 WARN [Thread-957 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34023,DS-7068dce2-5d81-4029-bc2f-296d6fc5c0df,DISK] 2024-11-16T12:47:31,792 WARN [Thread-957 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741871_1054 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:31,793 WARN [Thread-957 {}] hdfs.DataStreamer(1731): Error Recovery for BP-886867469-172.17.0.2-1731761227469:blk_1073741871_1054 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41753,DS-8258fc71-cd75-42d7-8209-2e094c48164b,DISK], DatanodeInfoWithStorage[127.0.0.1:46657,DS-237c4a1c-6bb3-4081-85ce-248206a8d4b3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41753,DS-8258fc71-cd75-42d7-8209-2e094c48164b,DISK]) is bad. 2024-11-16T12:47:31,793 WARN [Thread-957 {}] hdfs.DataStreamer(1850): Abandoning BP-886867469-172.17.0.2-1731761227469:blk_1073741871_1054 2024-11-16T12:47:31,793 WARN [Thread-957 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41753,DS-8258fc71-cd75-42d7-8209-2e094c48164b,DISK] 2024-11-16T12:47:31,794 WARN [IPC Server handler 1 on default port 39393 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-16T12:47:31,794 WARN [IPC Server handler 1 on default port 39393 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-16T12:47:31,794 WARN [IPC Server handler 1 on default port 39393 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-16T12:47:31,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46657 is added to blk_1073741872_1055 (size=6027) 2024-11-16T12:47:32,199 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=45 (bloomFilter=true), to=hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d3649421ab101859b27c0f36943512c7/.tmp/info/aa7a90d88924455c8249257aa43a8515 2024-11-16T12:47:32,212 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d3649421ab101859b27c0f36943512c7/.tmp/info/aa7a90d88924455c8249257aa43a8515 as hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d3649421ab101859b27c0f36943512c7/info/aa7a90d88924455c8249257aa43a8515 2024-11-16T12:47:32,220 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d3649421ab101859b27c0f36943512c7/info/aa7a90d88924455c8249257aa43a8515, entries=1, sequenceid=45, filesize=5.9 K 2024-11-16T12:47:32,222 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for d3649421ab101859b27c0f36943512c7 in 448ms, sequenceid=45, compaction requested=false 2024-11-16T12:47:32,222 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for d3649421ab101859b27c0f36943512c7: 2024-11-16T12:47:32,222 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.5 K, sizeToCheck=16.0 K 2024-11-16T12:47:32,222 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T12:47:32,222 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d3649421ab101859b27c0f36943512c7/info/deafd3bbe23e4ca482fe11e6d4dedd37 because midkey is the same as first or last row 2024-11-16T12:47:32,231 INFO [regionserver/0450ab8807f5:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34023,DS-7068dce2-5d81-4029-bc2f-296d6fc5c0df,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:32,231 WARN [regionserver/0450ab8807f5:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46657,DS-237c4a1c-6bb3-4081-85ce-248206a8d4b3,DISK]] 2024-11-16T12:47:32,231 DEBUG [regionserver/0450ab8807f5:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 0450ab8807f5%2C33749%2C1731761229240:(num 1731761250215) roll requested 2024-11-16T12:47:32,231 INFO [regionserver/0450ab8807f5:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0450ab8807f5%2C33749%2C1731761229240.1731761252231 2024-11-16T12:47:32,234 WARN [Thread-961 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741873_1056 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:32,234 WARN [Thread-961 {}] hdfs.DataStreamer(1731): Error Recovery for BP-886867469-172.17.0.2-1731761227469:blk_1073741873_1056 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41753,DS-8258fc71-cd75-42d7-8209-2e094c48164b,DISK], DatanodeInfoWithStorage[127.0.0.1:46657,DS-237c4a1c-6bb3-4081-85ce-248206a8d4b3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41753,DS-8258fc71-cd75-42d7-8209-2e094c48164b,DISK]) is bad. 2024-11-16T12:47:32,234 WARN [Thread-961 {}] hdfs.DataStreamer(1850): Abandoning BP-886867469-172.17.0.2-1731761227469:blk_1073741873_1056 2024-11-16T12:47:32,235 WARN [Thread-961 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41753,DS-8258fc71-cd75-42d7-8209-2e094c48164b,DISK] 2024-11-16T12:47:32,237 WARN [Thread-961 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741874_1057 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:39047 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:32,237 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1287641734_22 at /127.0.0.1:35736 [Receiving block BP-886867469-172.17.0.2-1731761227469:blk_1073741874_1057] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/cluster_7a19b596-aa5b-5866-3868-b2473a5bf7fd/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/cluster_7a19b596-aa5b-5866-3868-b2473a5bf7fd/data/data8]'}, localName='127.0.0.1:46657', datanodeUuid='c07e50c5-97c3-4ced-b66a-aa7258388f1b', xmitsInProgress=0}:Exception transferring block BP-886867469-172.17.0.2-1731761227469:blk_1073741874_1057 to mirror 127.0.0.1:39047 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T12:47:32,237 WARN [Thread-961 {}] hdfs.DataStreamer(1731): Error Recovery for BP-886867469-172.17.0.2-1731761227469:blk_1073741874_1057 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46657,DS-237c4a1c-6bb3-4081-85ce-248206a8d4b3,DISK], DatanodeInfoWithStorage[127.0.0.1:39047,DS-bf04fae8-0ed5-4484-9b52-e6e454497ab0,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39047,DS-bf04fae8-0ed5-4484-9b52-e6e454497ab0,DISK]) is bad. 2024-11-16T12:47:32,238 WARN [Thread-961 {}] hdfs.DataStreamer(1850): Abandoning BP-886867469-172.17.0.2-1731761227469:blk_1073741874_1057 2024-11-16T12:47:32,238 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1287641734_22 at /127.0.0.1:35736 [Receiving block BP-886867469-172.17.0.2-1731761227469:blk_1073741874_1057] {}] datanode.BlockReceiver(316): Block 1073741874 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-16T12:47:32,238 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1287641734_22 at /127.0.0.1:35736 [Receiving block BP-886867469-172.17.0.2-1731761227469:blk_1073741874_1057] {}] datanode.DataXceiver(331): 127.0.0.1:46657:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35736 dst: /127.0.0.1:46657 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T12:47:32,238 WARN [Thread-961 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39047,DS-bf04fae8-0ed5-4484-9b52-e6e454497ab0,DISK] 2024-11-16T12:47:32,240 WARN [Thread-961 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741875_1058 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:32,240 WARN [Thread-961 {}] hdfs.DataStreamer(1731): Error Recovery for BP-886867469-172.17.0.2-1731761227469:blk_1073741875_1058 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37937,DS-2055084b-7869-4d0c-b186-25cbf66349fb,DISK], DatanodeInfoWithStorage[127.0.0.1:34023,DS-7068dce2-5d81-4029-bc2f-296d6fc5c0df,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37937,DS-2055084b-7869-4d0c-b186-25cbf66349fb,DISK]) is bad. 2024-11-16T12:47:32,240 WARN [Thread-961 {}] hdfs.DataStreamer(1850): Abandoning BP-886867469-172.17.0.2-1731761227469:blk_1073741875_1058 2024-11-16T12:47:32,241 WARN [Thread-961 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37937,DS-2055084b-7869-4d0c-b186-25cbf66349fb,DISK] 2024-11-16T12:47:32,243 WARN [Thread-961 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741876_1059 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34023 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:32,243 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1287641734_22 at /127.0.0.1:35748 [Receiving block BP-886867469-172.17.0.2-1731761227469:blk_1073741876_1059] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/cluster_7a19b596-aa5b-5866-3868-b2473a5bf7fd/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/cluster_7a19b596-aa5b-5866-3868-b2473a5bf7fd/data/data8]'}, localName='127.0.0.1:46657', datanodeUuid='c07e50c5-97c3-4ced-b66a-aa7258388f1b', xmitsInProgress=0}:Exception transferring block BP-886867469-172.17.0.2-1731761227469:blk_1073741876_1059 to mirror 127.0.0.1:34023 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T12:47:32,244 WARN [Thread-961 {}] hdfs.DataStreamer(1731): Error Recovery for BP-886867469-172.17.0.2-1731761227469:blk_1073741876_1059 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46657,DS-237c4a1c-6bb3-4081-85ce-248206a8d4b3,DISK], DatanodeInfoWithStorage[127.0.0.1:34023,DS-7068dce2-5d81-4029-bc2f-296d6fc5c0df,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34023,DS-7068dce2-5d81-4029-bc2f-296d6fc5c0df,DISK]) is bad. 2024-11-16T12:47:32,244 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1287641734_22 at /127.0.0.1:35748 [Receiving block BP-886867469-172.17.0.2-1731761227469:blk_1073741876_1059] {}] datanode.BlockReceiver(316): Block 1073741876 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-16T12:47:32,244 WARN [Thread-961 {}] hdfs.DataStreamer(1850): Abandoning BP-886867469-172.17.0.2-1731761227469:blk_1073741876_1059 2024-11-16T12:47:32,244 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1287641734_22 at /127.0.0.1:35748 [Receiving block BP-886867469-172.17.0.2-1731761227469:blk_1073741876_1059] {}] datanode.DataXceiver(331): 127.0.0.1:46657:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35748 dst: /127.0.0.1:46657 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T12:47:32,244 WARN [Thread-961 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34023,DS-7068dce2-5d81-4029-bc2f-296d6fc5c0df,DISK] 2024-11-16T12:47:32,245 WARN [IPC Server handler 1 on default port 39393 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-16T12:47:32,245 WARN [IPC Server handler 1 on default port 39393 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-16T12:47:32,245 WARN [IPC Server handler 1 on default port 39393 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-16T12:47:32,248 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:47:32,248 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:47:32,248 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:47:32,249 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:47:32,249 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:47:32,249 INFO [regionserver/0450ab8807f5:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.1731761250215 with entries=15, filesize=13.26 KB; new WAL /user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.1731761252231 2024-11-16T12:47:32,250 DEBUG [regionserver/0450ab8807f5:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34707:34707)] 2024-11-16T12:47:32,250 DEBUG [regionserver/0450ab8807f5:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.1731761229866 is not closed yet, will try archiving it next time 2024-11-16T12:47:32,250 DEBUG [regionserver/0450ab8807f5:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.1731761250215 is not closed yet, will try archiving it next time 2024-11-16T12:47:32,250 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.1731761246185 to hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/oldWALs/0450ab8807f5%2C33749%2C1731761229240.1731761246185 2024-11-16T12:47:32,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46657 is added to blk_1073741857_1040 (size=13591) 2024-11-16T12:47:32,612 INFO [regionserver/0450ab8807f5:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34023,DS-7068dce2-5d81-4029-bc2f-296d6fc5c0df,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:32,652 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.1731761229866 is not closed yet, will try archiving it next time 2024-11-16T12:47:32,998 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@2b42e84b[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:46657, datanodeUuid=c07e50c5-97c3-4ced-b66a-aa7258388f1b, infoPort=34707, infoSecurePort=0, ipcPort=34013, storageInfo=lv=-57;cid=testClusterID;nsid=2033418441;c=1731761227469):Failed to transfer BP-886867469-172.17.0.2-1731761227469:blk_1073741842_1025 to 127.0.0.1:37937 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T12:47:32,998 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@6a667fd4[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:46657, datanodeUuid=c07e50c5-97c3-4ced-b66a-aa7258388f1b, infoPort=34707, infoSecurePort=0, ipcPort=34013, storageInfo=lv=-57;cid=testClusterID;nsid=2033418441;c=1731761227469):Failed to transfer BP-886867469-172.17.0.2-1731761227469:blk_1073741862_1045 to 127.0.0.1:39047 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T12:47:33,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33749 {}] regionserver.HRegion(8855): Flush requested on d3649421ab101859b27c0f36943512c7 2024-11-16T12:47:33,204 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing d3649421ab101859b27c0f36943512c7 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-16T12:47:33,209 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d3649421ab101859b27c0f36943512c7/.tmp/info/a56d48fb175343a9a9a3aab3df98e30a is 1079, key is tmprow/info:/1731761253202/Put/seqid=0 2024-11-16T12:47:33,211 WARN [Thread-967 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741878_1061 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:33,212 WARN [Thread-967 {}] hdfs.DataStreamer(1731): Error Recovery for BP-886867469-172.17.0.2-1731761227469:blk_1073741878_1061 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37937,DS-2055084b-7869-4d0c-b186-25cbf66349fb,DISK], DatanodeInfoWithStorage[127.0.0.1:41753,DS-8258fc71-cd75-42d7-8209-2e094c48164b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37937,DS-2055084b-7869-4d0c-b186-25cbf66349fb,DISK]) is bad. 2024-11-16T12:47:33,212 WARN [Thread-967 {}] hdfs.DataStreamer(1850): Abandoning BP-886867469-172.17.0.2-1731761227469:blk_1073741878_1061 2024-11-16T12:47:33,212 WARN [Thread-967 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37937,DS-2055084b-7869-4d0c-b186-25cbf66349fb,DISK] 2024-11-16T12:47:33,216 WARN [Thread-967 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741879_1062 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41753 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:33,216 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1287641734_22 at /127.0.0.1:35764 [Receiving block BP-886867469-172.17.0.2-1731761227469:blk_1073741879_1062] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/cluster_7a19b596-aa5b-5866-3868-b2473a5bf7fd/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/cluster_7a19b596-aa5b-5866-3868-b2473a5bf7fd/data/data8]'}, localName='127.0.0.1:46657', datanodeUuid='c07e50c5-97c3-4ced-b66a-aa7258388f1b', xmitsInProgress=0}:Exception transferring block BP-886867469-172.17.0.2-1731761227469:blk_1073741879_1062 to mirror 127.0.0.1:41753 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T12:47:33,216 WARN [Thread-967 {}] hdfs.DataStreamer(1731): Error Recovery for BP-886867469-172.17.0.2-1731761227469:blk_1073741879_1062 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46657,DS-237c4a1c-6bb3-4081-85ce-248206a8d4b3,DISK], DatanodeInfoWithStorage[127.0.0.1:41753,DS-8258fc71-cd75-42d7-8209-2e094c48164b,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41753,DS-8258fc71-cd75-42d7-8209-2e094c48164b,DISK]) is bad. 2024-11-16T12:47:33,216 WARN [Thread-967 {}] hdfs.DataStreamer(1850): Abandoning BP-886867469-172.17.0.2-1731761227469:blk_1073741879_1062 2024-11-16T12:47:33,216 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1287641734_22 at /127.0.0.1:35764 [Receiving block BP-886867469-172.17.0.2-1731761227469:blk_1073741879_1062] {}] datanode.BlockReceiver(316): Block 1073741879 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-16T12:47:33,216 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1287641734_22 at /127.0.0.1:35764 [Receiving block BP-886867469-172.17.0.2-1731761227469:blk_1073741879_1062] {}] datanode.DataXceiver(331): 127.0.0.1:46657:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35764 dst: /127.0.0.1:46657 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T12:47:33,217 WARN [Thread-967 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41753,DS-8258fc71-cd75-42d7-8209-2e094c48164b,DISK] 2024-11-16T12:47:33,218 WARN [Thread-967 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741880_1063 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:33,219 WARN [Thread-967 {}] hdfs.DataStreamer(1731): Error Recovery for BP-886867469-172.17.0.2-1731761227469:blk_1073741880_1063 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34023,DS-7068dce2-5d81-4029-bc2f-296d6fc5c0df,DISK], DatanodeInfoWithStorage[127.0.0.1:46657,DS-237c4a1c-6bb3-4081-85ce-248206a8d4b3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34023,DS-7068dce2-5d81-4029-bc2f-296d6fc5c0df,DISK]) is bad. 2024-11-16T12:47:33,219 WARN [Thread-967 {}] hdfs.DataStreamer(1850): Abandoning BP-886867469-172.17.0.2-1731761227469:blk_1073741880_1063 2024-11-16T12:47:33,219 WARN [Thread-967 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34023,DS-7068dce2-5d81-4029-bc2f-296d6fc5c0df,DISK] 2024-11-16T12:47:33,222 WARN [Thread-967 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741881_1064 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:39047 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:33,222 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1287641734_22 at /127.0.0.1:35766 [Receiving block BP-886867469-172.17.0.2-1731761227469:blk_1073741881_1064] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/cluster_7a19b596-aa5b-5866-3868-b2473a5bf7fd/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/cluster_7a19b596-aa5b-5866-3868-b2473a5bf7fd/data/data8]'}, localName='127.0.0.1:46657', datanodeUuid='c07e50c5-97c3-4ced-b66a-aa7258388f1b', xmitsInProgress=0}:Exception transferring block BP-886867469-172.17.0.2-1731761227469:blk_1073741881_1064 to mirror 127.0.0.1:39047 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T12:47:33,222 WARN [Thread-967 {}] hdfs.DataStreamer(1731): Error Recovery for BP-886867469-172.17.0.2-1731761227469:blk_1073741881_1064 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46657,DS-237c4a1c-6bb3-4081-85ce-248206a8d4b3,DISK], DatanodeInfoWithStorage[127.0.0.1:39047,DS-bf04fae8-0ed5-4484-9b52-e6e454497ab0,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39047,DS-bf04fae8-0ed5-4484-9b52-e6e454497ab0,DISK]) is bad. 2024-11-16T12:47:33,222 WARN [Thread-967 {}] hdfs.DataStreamer(1850): Abandoning BP-886867469-172.17.0.2-1731761227469:blk_1073741881_1064 2024-11-16T12:47:33,222 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1287641734_22 at /127.0.0.1:35766 [Receiving block BP-886867469-172.17.0.2-1731761227469:blk_1073741881_1064] {}] datanode.BlockReceiver(316): Block 1073741881 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-16T12:47:33,223 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1287641734_22 at /127.0.0.1:35766 [Receiving block BP-886867469-172.17.0.2-1731761227469:blk_1073741881_1064] {}] datanode.DataXceiver(331): 127.0.0.1:46657:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35766 dst: /127.0.0.1:46657 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T12:47:33,223 WARN [Thread-967 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39047,DS-bf04fae8-0ed5-4484-9b52-e6e454497ab0,DISK] 2024-11-16T12:47:33,224 WARN [IPC Server handler 1 on default port 39393 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-16T12:47:33,224 WARN [IPC Server handler 1 on default port 39393 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-16T12:47:33,224 WARN [IPC Server handler 1 on default port 39393 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-16T12:47:33,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46657 is added to blk_1073741882_1065 (size=6027) 2024-11-16T12:47:33,358 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34023,DS-7068dce2-5d81-4029-bc2f-296d6fc5c0df,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:33,629 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d3649421ab101859b27c0f36943512c7/.tmp/info/a56d48fb175343a9a9a3aab3df98e30a 2024-11-16T12:47:33,638 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d3649421ab101859b27c0f36943512c7/.tmp/info/a56d48fb175343a9a9a3aab3df98e30a as hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d3649421ab101859b27c0f36943512c7/info/a56d48fb175343a9a9a3aab3df98e30a 2024-11-16T12:47:33,646 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d3649421ab101859b27c0f36943512c7/info/a56d48fb175343a9a9a3aab3df98e30a, entries=1, sequenceid=55, filesize=5.9 K 2024-11-16T12:47:33,647 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for d3649421ab101859b27c0f36943512c7 in 443ms, sequenceid=55, compaction requested=true 2024-11-16T12:47:33,647 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for d3649421ab101859b27c0f36943512c7: 2024-11-16T12:47:33,647 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=29.3 K, sizeToCheck=16.0 K 2024-11-16T12:47:33,647 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T12:47:33,647 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d3649421ab101859b27c0f36943512c7/info/deafd3bbe23e4ca482fe11e6d4dedd37 because midkey is the same as first or last row 2024-11-16T12:47:33,648 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d3649421ab101859b27c0f36943512c7:info, priority=-2147483648, current under compaction store size is 1 2024-11-16T12:47:33,648 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T12:47:33,648 DEBUG [RS:0;0450ab8807f5:33749-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-16T12:47:33,649 DEBUG [RS:0;0450ab8807f5:33749-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 30048 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-16T12:47:33,649 DEBUG [RS:0;0450ab8807f5:33749-shortCompactions-0 {}] regionserver.HStore(1541): d3649421ab101859b27c0f36943512c7/info is initiating minor compaction (all files) 2024-11-16T12:47:33,649 INFO [RS:0;0450ab8807f5:33749-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of d3649421ab101859b27c0f36943512c7/info in TestLogRolling-testLogRollOnDatanodeDeath,,1731761230641.d3649421ab101859b27c0f36943512c7. 2024-11-16T12:47:33,650 INFO [RS:0;0450ab8807f5:33749-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d3649421ab101859b27c0f36943512c7/info/deafd3bbe23e4ca482fe11e6d4dedd37, hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d3649421ab101859b27c0f36943512c7/info/aa7a90d88924455c8249257aa43a8515, hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d3649421ab101859b27c0f36943512c7/info/a56d48fb175343a9a9a3aab3df98e30a] into tmpdir=hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d3649421ab101859b27c0f36943512c7/.tmp, totalSize=29.3 K 2024-11-16T12:47:33,650 DEBUG [RS:0;0450ab8807f5:33749-shortCompactions-0 {}] compactions.Compactor(225): Compacting deafd3bbe23e4ca482fe11e6d4dedd37, keycount=12, bloomtype=ROW, size=17.6 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1731761244230 2024-11-16T12:47:33,651 DEBUG [RS:0;0450ab8807f5:33749-shortCompactions-0 {}] compactions.Compactor(225): Compacting aa7a90d88924455c8249257aa43a8515, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=45, earliestPutTs=1731761251771 2024-11-16T12:47:33,651 DEBUG [RS:0;0450ab8807f5:33749-shortCompactions-0 {}] compactions.Compactor(225): Compacting a56d48fb175343a9a9a3aab3df98e30a, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1731761253202 2024-11-16T12:47:33,671 INFO [RS:0;0450ab8807f5:33749-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d3649421ab101859b27c0f36943512c7#info#compaction#24 average throughput is 4.10 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-16T12:47:33,672 DEBUG [RS:0;0450ab8807f5:33749-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d3649421ab101859b27c0f36943512c7/.tmp/info/c84097b57f774f2e891601a38610f616 is 1080, key is row0002/info:/1731761244230/Put/seqid=0 2024-11-16T12:47:33,674 WARN [Thread-973 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741883_1066 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:33,674 WARN [Thread-973 {}] hdfs.DataStreamer(1731): Error Recovery for BP-886867469-172.17.0.2-1731761227469:blk_1073741883_1066 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41753,DS-8258fc71-cd75-42d7-8209-2e094c48164b,DISK], DatanodeInfoWithStorage[127.0.0.1:34023,DS-7068dce2-5d81-4029-bc2f-296d6fc5c0df,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41753,DS-8258fc71-cd75-42d7-8209-2e094c48164b,DISK]) is bad. 2024-11-16T12:47:33,674 WARN [Thread-973 {}] hdfs.DataStreamer(1850): Abandoning BP-886867469-172.17.0.2-1731761227469:blk_1073741883_1066 2024-11-16T12:47:33,675 WARN [Thread-973 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41753,DS-8258fc71-cd75-42d7-8209-2e094c48164b,DISK] 2024-11-16T12:47:33,676 WARN [Thread-973 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741884_1067 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:33,676 WARN [Thread-973 {}] hdfs.DataStreamer(1731): Error Recovery for BP-886867469-172.17.0.2-1731761227469:blk_1073741884_1067 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34023,DS-7068dce2-5d81-4029-bc2f-296d6fc5c0df,DISK], DatanodeInfoWithStorage[127.0.0.1:46657,DS-237c4a1c-6bb3-4081-85ce-248206a8d4b3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34023,DS-7068dce2-5d81-4029-bc2f-296d6fc5c0df,DISK]) is bad. 2024-11-16T12:47:33,676 WARN [Thread-973 {}] hdfs.DataStreamer(1850): Abandoning BP-886867469-172.17.0.2-1731761227469:blk_1073741884_1067 2024-11-16T12:47:33,677 WARN [Thread-973 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34023,DS-7068dce2-5d81-4029-bc2f-296d6fc5c0df,DISK] 2024-11-16T12:47:33,679 WARN [Thread-973 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741885_1068 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:33,679 WARN [Thread-973 {}] hdfs.DataStreamer(1731): Error Recovery for BP-886867469-172.17.0.2-1731761227469:blk_1073741885_1068 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37937,DS-2055084b-7869-4d0c-b186-25cbf66349fb,DISK], DatanodeInfoWithStorage[127.0.0.1:46657,DS-237c4a1c-6bb3-4081-85ce-248206a8d4b3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37937,DS-2055084b-7869-4d0c-b186-25cbf66349fb,DISK]) is bad. 2024-11-16T12:47:33,679 WARN [Thread-973 {}] hdfs.DataStreamer(1850): Abandoning BP-886867469-172.17.0.2-1731761227469:blk_1073741885_1068 2024-11-16T12:47:33,680 WARN [Thread-973 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37937,DS-2055084b-7869-4d0c-b186-25cbf66349fb,DISK] 2024-11-16T12:47:33,682 WARN [Thread-973 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741886_1069 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:33,682 WARN [Thread-973 {}] hdfs.DataStreamer(1731): Error Recovery for BP-886867469-172.17.0.2-1731761227469:blk_1073741886_1069 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39047,DS-bf04fae8-0ed5-4484-9b52-e6e454497ab0,DISK], DatanodeInfoWithStorage[127.0.0.1:46657,DS-237c4a1c-6bb3-4081-85ce-248206a8d4b3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39047,DS-bf04fae8-0ed5-4484-9b52-e6e454497ab0,DISK]) is bad. 2024-11-16T12:47:33,682 WARN [Thread-973 {}] hdfs.DataStreamer(1850): Abandoning BP-886867469-172.17.0.2-1731761227469:blk_1073741886_1069 2024-11-16T12:47:33,683 WARN [Thread-973 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39047,DS-bf04fae8-0ed5-4484-9b52-e6e454497ab0,DISK] 2024-11-16T12:47:33,684 WARN [IPC Server handler 1 on default port 39393 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-16T12:47:33,684 WARN [IPC Server handler 1 on default port 39393 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-16T12:47:33,684 WARN [IPC Server handler 1 on default port 39393 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-16T12:47:33,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46657 is added to blk_1073741887_1070 (size=18097) 2024-11-16T12:47:33,695 DEBUG [RS:0;0450ab8807f5:33749-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d3649421ab101859b27c0f36943512c7/.tmp/info/c84097b57f774f2e891601a38610f616 as hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d3649421ab101859b27c0f36943512c7/info/c84097b57f774f2e891601a38610f616 2024-11-16T12:47:33,703 INFO [RS:0;0450ab8807f5:33749-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in d3649421ab101859b27c0f36943512c7/info of d3649421ab101859b27c0f36943512c7 into c84097b57f774f2e891601a38610f616(size=17.7 K), total size for store is 17.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-16T12:47:33,703 DEBUG [RS:0;0450ab8807f5:33749-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for d3649421ab101859b27c0f36943512c7: 2024-11-16T12:47:33,703 INFO [RS:0;0450ab8807f5:33749-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1731761230641.d3649421ab101859b27c0f36943512c7., storeName=d3649421ab101859b27c0f36943512c7/info, priority=13, startTime=1731761253647; duration=0sec 2024-11-16T12:47:33,703 DEBUG [RS:0;0450ab8807f5:33749-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-16T12:47:33,703 DEBUG [RS:0;0450ab8807f5:33749-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T12:47:33,703 DEBUG [RS:0;0450ab8807f5:33749-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d3649421ab101859b27c0f36943512c7/info/c84097b57f774f2e891601a38610f616 because midkey is the same as first or last row 2024-11-16T12:47:33,703 DEBUG [RS:0;0450ab8807f5:33749-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-16T12:47:33,703 DEBUG [RS:0;0450ab8807f5:33749-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T12:47:33,703 DEBUG [RS:0;0450ab8807f5:33749-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d3649421ab101859b27c0f36943512c7/info/c84097b57f774f2e891601a38610f616 because midkey is the same as first or last row 2024-11-16T12:47:33,704 DEBUG [RS:0;0450ab8807f5:33749-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-16T12:47:33,704 DEBUG [RS:0;0450ab8807f5:33749-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T12:47:33,704 DEBUG [RS:0;0450ab8807f5:33749-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d3649421ab101859b27c0f36943512c7/info/c84097b57f774f2e891601a38610f616 because midkey is the same as first or last row 2024-11-16T12:47:33,704 DEBUG [RS:0;0450ab8807f5:33749-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T12:47:33,704 DEBUG [RS:0;0450ab8807f5:33749-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d3649421ab101859b27c0f36943512c7:info 2024-11-16T12:47:33,996 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@6a667fd4[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:46657, datanodeUuid=c07e50c5-97c3-4ced-b66a-aa7258388f1b, infoPort=34707, infoSecurePort=0, ipcPort=34013, storageInfo=lv=-57;cid=testClusterID;nsid=2033418441;c=1731761227469):Failed to transfer BP-886867469-172.17.0.2-1731761227469:blk_1073741867_1050 to 127.0.0.1:37937 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T12:47:33,996 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@2b42e84b[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:46657, datanodeUuid=c07e50c5-97c3-4ced-b66a-aa7258388f1b, infoPort=34707, infoSecurePort=0, ipcPort=34013, storageInfo=lv=-57;cid=testClusterID;nsid=2033418441;c=1731761227469):Failed to transfer BP-886867469-172.17.0.2-1731761227469:blk_1073741872_1055 to 127.0.0.1:34023 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T12:47:34,250 INFO [regionserver/0450ab8807f5:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34023,DS-7068dce2-5d81-4029-bc2f-296d6fc5c0df,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:34,251 WARN [regionserver/0450ab8807f5:0.logRoller {}] wal.FSHLog(539): Too many consecutive RollWriter requests, it's a sign of the total number of live datanodes is lower than the tolerable replicas. 2024-11-16T12:47:34,438 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T12:47:34,441 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T12:47:34,442 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T12:47:34,442 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T12:47:34,442 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-16T12:47:34,442 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6d04dc38{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/hadoop.log.dir/,AVAILABLE} 2024-11-16T12:47:34,443 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6e48919f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T12:47:34,536 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1c23e5e9{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/java.io.tmpdir/jetty-localhost-41961-hadoop-hdfs-3_4_1-tests_jar-_-any-3498057072490230212/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T12:47:34,536 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@62dc63d8{HTTP/1.1, (http/1.1)}{localhost:41961} 2024-11-16T12:47:34,536 INFO [Time-limited test {}] server.Server(415): Started @133423ms 2024-11-16T12:47:34,537 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T12:47:34,612 INFO [regionserver/0450ab8807f5:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34023,DS-7068dce2-5d81-4029-bc2f-296d6fc5c0df,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:34,898 WARN [Thread-991 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T12:47:34,906 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9c3206a5bab9b4a0 with lease ID 0x92d8eda59640438d: from storage DS-8258fc71-cd75-42d7-8209-2e094c48164b node DatanodeRegistration(127.0.0.1:41597, datanodeUuid=fc9bb905-c3c0-4f61-a035-12b1008d0723, infoPort=40763, infoSecurePort=0, ipcPort=44473, storageInfo=lv=-57;cid=testClusterID;nsid=2033418441;c=1731761227469), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T12:47:34,906 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9c3206a5bab9b4a0 with lease ID 0x92d8eda59640438d: from storage DS-3c906e86-911b-4015-ae0f-eeb64ca7d21e node DatanodeRegistration(127.0.0.1:41597, datanodeUuid=fc9bb905-c3c0-4f61-a035-12b1008d0723, infoPort=40763, infoSecurePort=0, ipcPort=44473, storageInfo=lv=-57;cid=testClusterID;nsid=2033418441;c=1731761227469), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T12:47:35,358 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34023,DS-7068dce2-5d81-4029-bc2f-296d6fc5c0df,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:35,999 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@6a667fd4[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:46657, datanodeUuid=c07e50c5-97c3-4ced-b66a-aa7258388f1b, infoPort=34707, infoSecurePort=0, ipcPort=34013, storageInfo=lv=-57;cid=testClusterID;nsid=2033418441;c=1731761227469):Failed to transfer BP-886867469-172.17.0.2-1731761227469:blk_1073741882_1065 to 127.0.0.1:39047 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T12:47:35,999 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@2b42e84b[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:46657, datanodeUuid=c07e50c5-97c3-4ced-b66a-aa7258388f1b, infoPort=34707, infoSecurePort=0, ipcPort=34013, storageInfo=lv=-57;cid=testClusterID;nsid=2033418441;c=1731761227469):Failed to transfer BP-886867469-172.17.0.2-1731761227469:blk_1073741857_1040 to 127.0.0.1:39047 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T12:47:36,251 INFO [regionserver/0450ab8807f5:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34023,DS-7068dce2-5d81-4029-bc2f-296d6fc5c0df,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:36,612 INFO [regionserver/0450ab8807f5:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34023,DS-7068dce2-5d81-4029-bc2f-296d6fc5c0df,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:37,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41597 is added to blk_1073741887_1070 (size=18097) 2024-11-16T12:47:37,359 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34023,DS-7068dce2-5d81-4029-bc2f-296d6fc5c0df,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:38,251 INFO [regionserver/0450ab8807f5:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34023,DS-7068dce2-5d81-4029-bc2f-296d6fc5c0df,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:38,613 INFO [regionserver/0450ab8807f5:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34023,DS-7068dce2-5d81-4029-bc2f-296d6fc5c0df,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:39,065 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-16T12:47:39,359 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34023,DS-7068dce2-5d81-4029-bc2f-296d6fc5c0df,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:39,604 ERROR [FSHLog-0-hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/MasterData-prefix:0450ab8807f5,35227,1731761229084 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34023,DS-7068dce2-5d81-4029-bc2f-296d6fc5c0df,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:39,605 WARN [FSHLog-0-hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/MasterData-prefix:0450ab8807f5,35227,1731761229084 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34023,DS-7068dce2-5d81-4029-bc2f-296d6fc5c0df,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:39,605 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 0450ab8807f5%2C35227%2C1731761229084:(num 1731761229377) roll requested 2024-11-16T12:47:39,606 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0450ab8807f5%2C35227%2C1731761229084.1731761259605 2024-11-16T12:47:39,611 WARN [Thread-1012 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741888_1071 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:39,611 WARN [Thread-1012 {}] hdfs.DataStreamer(1731): Error Recovery for BP-886867469-172.17.0.2-1731761227469:blk_1073741888_1071 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37937,DS-2055084b-7869-4d0c-b186-25cbf66349fb,DISK], DatanodeInfoWithStorage[127.0.0.1:46657,DS-237c4a1c-6bb3-4081-85ce-248206a8d4b3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37937,DS-2055084b-7869-4d0c-b186-25cbf66349fb,DISK]) is bad. 2024-11-16T12:47:39,611 WARN [Thread-1012 {}] hdfs.DataStreamer(1850): Abandoning BP-886867469-172.17.0.2-1731761227469:blk_1073741888_1071 2024-11-16T12:47:39,613 WARN [Thread-1012 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37937,DS-2055084b-7869-4d0c-b186-25cbf66349fb,DISK] 2024-11-16T12:47:39,620 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:47:39,620 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:47:39,621 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:47:39,621 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:47:39,621 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:47:39,621 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/MasterData/WALs/0450ab8807f5,35227,1731761229084/0450ab8807f5%2C35227%2C1731761229084.1731761229377 with entries=54, filesize=26.68 KB; new WAL /user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/MasterData/WALs/0450ab8807f5,35227,1731761229084/0450ab8807f5%2C35227%2C1731761229084.1731761259605 2024-11-16T12:47:39,622 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34023,DS-7068dce2-5d81-4029-bc2f-296d6fc5c0df,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:39,622 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34023,DS-7068dce2-5d81-4029-bc2f-296d6fc5c0df,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:39,623 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/MasterData/WALs/0450ab8807f5,35227,1731761229084/0450ab8807f5%2C35227%2C1731761229084.1731761229377 2024-11-16T12:47:39,623 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34707:34707),(127.0.0.1/127.0.0.1:40763:40763)] 2024-11-16T12:47:39,623 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/MasterData/WALs/0450ab8807f5,35227,1731761229084/0450ab8807f5%2C35227%2C1731761229084.1731761229377 is not closed yet, will try archiving it next time 2024-11-16T12:47:39,623 WARN [IPC Server handler 3 on default port 39393 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/MasterData/WALs/0450ab8807f5,35227,1731761229084/0450ab8807f5%2C35227%2C1731761229084.1731761229377 has not been closed. Lease recovery is in progress. RecoveryId = 1073 for block blk_1073741830_1006 2024-11-16T12:47:39,624 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/MasterData/WALs/0450ab8807f5,35227,1731761229084/0450ab8807f5%2C35227%2C1731761229084.1731761229377 after 1ms 2024-11-16T12:47:40,252 INFO [regionserver/0450ab8807f5:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34023,DS-7068dce2-5d81-4029-bc2f-296d6fc5c0df,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:40,613 INFO [regionserver/0450ab8807f5:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34023,DS-7068dce2-5d81-4029-bc2f-296d6fc5c0df,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:42,253 INFO [regionserver/0450ab8807f5:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34023,DS-7068dce2-5d81-4029-bc2f-296d6fc5c0df,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:42,614 INFO [regionserver/0450ab8807f5:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34023,DS-7068dce2-5d81-4029-bc2f-296d6fc5c0df,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:43,625 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/MasterData/WALs/0450ab8807f5,35227,1731761229084/0450ab8807f5%2C35227%2C1731761229084.1731761229377 after 4002ms 2024-11-16T12:47:44,253 INFO [regionserver/0450ab8807f5:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34023,DS-7068dce2-5d81-4029-bc2f-296d6fc5c0df,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:44,614 INFO [regionserver/0450ab8807f5:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34023,DS-7068dce2-5d81-4029-bc2f-296d6fc5c0df,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:44,902 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@8019efa[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:41597, datanodeUuid=fc9bb905-c3c0-4f61-a035-12b1008d0723, infoPort=40763, infoSecurePort=0, ipcPort=44473, storageInfo=lv=-57;cid=testClusterID;nsid=2033418441;c=1731761227469):Failed to transfer BP-886867469-172.17.0.2-1731761227469:blk_1073741836_1012 to 127.0.0.1:39047 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T12:47:44,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46657 is added to blk_1073741832_1008 (size=32) 2024-11-16T12:47:44,922 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@3972a23c {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-886867469-172.17.0.2-1731761227469:blk_1073741833_1009, datanode=DatanodeInfoWithStorage[127.0.0.1:34023,null,null]) java.net.ConnectException: Call From 0450ab8807f5/172.17.0.2 to localhost:40715 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-16T12:47:44,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41597 is added to blk_1073741833_1019 (size=455) 2024-11-16T12:47:45,211 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.1731761229866 to hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/oldWALs/0450ab8807f5%2C33749%2C1731761229240.1731761229866 2024-11-16T12:47:45,212 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.1731761250215 to hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/oldWALs/0450ab8807f5%2C33749%2C1731761229240.1731761250215 2024-11-16T12:47:45,902 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@759272bf[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:41597, datanodeUuid=fc9bb905-c3c0-4f61-a035-12b1008d0723, infoPort=40763, infoSecurePort=0, ipcPort=44473, storageInfo=lv=-57;cid=testClusterID;nsid=2033418441;c=1731761227469):Failed to transfer BP-886867469-172.17.0.2-1731761227469:blk_1073741828_1004 to 127.0.0.1:39047 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T12:47:45,902 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@8019efa[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:41597, datanodeUuid=fc9bb905-c3c0-4f61-a035-12b1008d0723, infoPort=40763, infoSecurePort=0, ipcPort=44473, storageInfo=lv=-57;cid=testClusterID;nsid=2033418441;c=1731761227469):Failed to transfer BP-886867469-172.17.0.2-1731761227469:blk_1073741826_1002 to 127.0.0.1:39047 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T12:47:46,254 INFO [regionserver/0450ab8807f5:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34023,DS-7068dce2-5d81-4029-bc2f-296d6fc5c0df,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:46,615 INFO [regionserver/0450ab8807f5:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34023,DS-7068dce2-5d81-4029-bc2f-296d6fc5c0df,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:47,802 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0450ab8807f5%2C33749%2C1731761229240.1731761267802 2024-11-16T12:47:47,807 WARN [Thread-1024 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741890_1074 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:39047 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:47,807 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_221165497_22 at /127.0.0.1:50152 [Receiving block BP-886867469-172.17.0.2-1731761227469:blk_1073741890_1074] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/cluster_7a19b596-aa5b-5866-3868-b2473a5bf7fd/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/cluster_7a19b596-aa5b-5866-3868-b2473a5bf7fd/data/data4]'}, localName='127.0.0.1:41597', datanodeUuid='fc9bb905-c3c0-4f61-a035-12b1008d0723', xmitsInProgress=0}:Exception transferring block BP-886867469-172.17.0.2-1731761227469:blk_1073741890_1074 to mirror 127.0.0.1:39047 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T12:47:47,807 WARN [Thread-1024 {}] hdfs.DataStreamer(1731): Error Recovery for BP-886867469-172.17.0.2-1731761227469:blk_1073741890_1074 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41597,DS-8258fc71-cd75-42d7-8209-2e094c48164b,DISK], DatanodeInfoWithStorage[127.0.0.1:39047,DS-bf04fae8-0ed5-4484-9b52-e6e454497ab0,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39047,DS-bf04fae8-0ed5-4484-9b52-e6e454497ab0,DISK]) is bad. 2024-11-16T12:47:47,808 WARN [Thread-1024 {}] hdfs.DataStreamer(1850): Abandoning BP-886867469-172.17.0.2-1731761227469:blk_1073741890_1074 2024-11-16T12:47:47,808 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_221165497_22 at /127.0.0.1:50152 [Receiving block BP-886867469-172.17.0.2-1731761227469:blk_1073741890_1074] {}] datanode.BlockReceiver(316): Block 1073741890 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-16T12:47:47,808 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_221165497_22 at /127.0.0.1:50152 [Receiving block BP-886867469-172.17.0.2-1731761227469:blk_1073741890_1074] {}] datanode.DataXceiver(331): 127.0.0.1:41597:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50152 dst: /127.0.0.1:41597 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T12:47:47,809 WARN [Thread-1024 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39047,DS-bf04fae8-0ed5-4484-9b52-e6e454497ab0,DISK] 2024-11-16T12:47:47,811 WARN [Thread-1024 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741891_1075 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37937 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:47,811 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_221165497_22 at /127.0.0.1:45328 [Receiving block BP-886867469-172.17.0.2-1731761227469:blk_1073741891_1075] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/cluster_7a19b596-aa5b-5866-3868-b2473a5bf7fd/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/cluster_7a19b596-aa5b-5866-3868-b2473a5bf7fd/data/data8]'}, localName='127.0.0.1:46657', datanodeUuid='c07e50c5-97c3-4ced-b66a-aa7258388f1b', xmitsInProgress=0}:Exception transferring block BP-886867469-172.17.0.2-1731761227469:blk_1073741891_1075 to mirror 127.0.0.1:37937 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T12:47:47,812 WARN [Thread-1024 {}] hdfs.DataStreamer(1731): Error Recovery for BP-886867469-172.17.0.2-1731761227469:blk_1073741891_1075 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46657,DS-237c4a1c-6bb3-4081-85ce-248206a8d4b3,DISK], DatanodeInfoWithStorage[127.0.0.1:37937,DS-2055084b-7869-4d0c-b186-25cbf66349fb,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37937,DS-2055084b-7869-4d0c-b186-25cbf66349fb,DISK]) is bad. 2024-11-16T12:47:47,812 WARN [Thread-1024 {}] hdfs.DataStreamer(1850): Abandoning BP-886867469-172.17.0.2-1731761227469:blk_1073741891_1075 2024-11-16T12:47:47,812 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_221165497_22 at /127.0.0.1:45328 [Receiving block BP-886867469-172.17.0.2-1731761227469:blk_1073741891_1075] {}] datanode.BlockReceiver(316): Block 1073741891 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-16T12:47:47,812 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_221165497_22 at /127.0.0.1:45328 [Receiving block BP-886867469-172.17.0.2-1731761227469:blk_1073741891_1075] {}] datanode.DataXceiver(331): 127.0.0.1:46657:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45328 dst: /127.0.0.1:46657 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T12:47:47,812 WARN [Thread-1024 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37937,DS-2055084b-7869-4d0c-b186-25cbf66349fb,DISK] 2024-11-16T12:47:47,817 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:47:47,817 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:47:47,817 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:47:47,817 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:47:47,817 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:47:47,818 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.1731761252231 with entries=13, filesize=12.60 KB; new WAL /user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.1731761267802 2024-11-16T12:47:47,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46657 is added to blk_1073741877_1060 (size=12911) 2024-11-16T12:47:47,836 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34707:34707),(127.0.0.1/127.0.0.1:40763:40763)] 2024-11-16T12:47:47,836 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.1731761252231 is not closed yet, will try archiving it next time 2024-11-16T12:47:47,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33749 {}] regionserver.HRegion(8855): Flush requested on d3649421ab101859b27c0f36943512c7 2024-11-16T12:47:47,842 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing d3649421ab101859b27c0f36943512c7 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-16T12:47:47,847 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d3649421ab101859b27c0f36943512c7/.tmp/info/6fce4de76aa7430ba6841d2fc19ffaeb is 1080, key is row0013/info:/1731761267838/Put/seqid=0 2024-11-16T12:47:47,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46657 is added to blk_1073741893_1077 (size=8190) 2024-11-16T12:47:47,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41597 is added to blk_1073741893_1077 (size=8190) 2024-11-16T12:47:47,854 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d3649421ab101859b27c0f36943512c7/.tmp/info/6fce4de76aa7430ba6841d2fc19ffaeb 2024-11-16T12:47:47,864 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d3649421ab101859b27c0f36943512c7/.tmp/info/6fce4de76aa7430ba6841d2fc19ffaeb as hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d3649421ab101859b27c0f36943512c7/info/6fce4de76aa7430ba6841d2fc19ffaeb 2024-11-16T12:47:47,872 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d3649421ab101859b27c0f36943512c7/info/6fce4de76aa7430ba6841d2fc19ffaeb, entries=3, sequenceid=66, filesize=8.0 K 2024-11-16T12:47:47,873 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7527, heapSize ~8.11 KB/8304, currentSize=9.46 KB/9683 for d3649421ab101859b27c0f36943512c7 in 32ms, sequenceid=66, compaction requested=false 2024-11-16T12:47:47,873 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for d3649421ab101859b27c0f36943512c7: 2024-11-16T12:47:47,874 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=25.7 K, sizeToCheck=16.0 K 2024-11-16T12:47:47,874 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T12:47:47,874 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d3649421ab101859b27c0f36943512c7/info/c84097b57f774f2e891601a38610f616 because midkey is the same as first or last row 2024-11-16T12:47:47,902 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@8019efa[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:41597, datanodeUuid=fc9bb905-c3c0-4f61-a035-12b1008d0723, infoPort=40763, infoSecurePort=0, ipcPort=44473, storageInfo=lv=-57;cid=testClusterID;nsid=2033418441;c=1731761227469):Failed to transfer BP-886867469-172.17.0.2-1731761227469:blk_1073741825_1001 to 127.0.0.1:37937 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T12:47:47,902 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@759272bf[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:41597, datanodeUuid=fc9bb905-c3c0-4f61-a035-12b1008d0723, infoPort=40763, infoSecurePort=0, ipcPort=44473, storageInfo=lv=-57;cid=testClusterID;nsid=2033418441;c=1731761227469):Failed to transfer BP-886867469-172.17.0.2-1731761227469:blk_1073741833_1019 to 127.0.0.1:37937 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T12:47:48,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33749 {}] regionserver.HRegion(8855): Flush requested on d3649421ab101859b27c0f36943512c7 2024-11-16T12:47:48,070 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing d3649421ab101859b27c0f36943512c7 1/1 column families, dataSize=10.51 KB heapSize=11.50 KB 2024-11-16T12:47:48,076 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d3649421ab101859b27c0f36943512c7/.tmp/info/d92573b75f4c457085c2352f03c9452e is 1080, key is row0015/info:/1731761267843/Put/seqid=0 2024-11-16T12:47:48,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46657 is added to blk_1073741894_1078 (size=14660) 2024-11-16T12:47:48,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41597 is added to blk_1073741894_1078 (size=14660) 2024-11-16T12:47:48,220 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.1731761252231 to hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/oldWALs/0450ab8807f5%2C33749%2C1731761229240.1731761252231 2024-11-16T12:47:48,254 INFO [regionserver/0450ab8807f5:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34023,DS-7068dce2-5d81-4029-bc2f-296d6fc5c0df,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:48,254 INFO [regionserver/0450ab8807f5:0.logRoller {}] wal.FSHLog(556): LowReplication-Roller was enabled. 2024-11-16T12:47:48,271 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-16T12:47:48,271 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-16T12:47:48,272 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T12:47:48,272 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T12:47:48,272 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T12:47:48,272 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-16T12:47:48,273 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-16T12:47:48,273 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=733918336, stopped=false 2024-11-16T12:47:48,273 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=0450ab8807f5,35227,1731761229084 2024-11-16T12:47:48,318 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35663-0x10144f8dfd10002, quorum=127.0.0.1:62408, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-16T12:47:48,318 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35227-0x10144f8dfd10000, quorum=127.0.0.1:62408, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-16T12:47:48,318 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35663-0x10144f8dfd10002, quorum=127.0.0.1:62408, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:47:48,318 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35227-0x10144f8dfd10000, quorum=127.0.0.1:62408, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:47:48,318 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33749-0x10144f8dfd10001, quorum=127.0.0.1:62408, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-16T12:47:48,318 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33749-0x10144f8dfd10001, quorum=127.0.0.1:62408, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:47:48,318 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-16T12:47:48,318 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-16T12:47:48,318 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:33749-0x10144f8dfd10001, quorum=127.0.0.1:62408, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T12:47:48,318 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T12:47:48,319 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T12:47:48,319 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '0450ab8807f5,33749,1731761229240' ***** 2024-11-16T12:47:48,319 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-16T12:47:48,319 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '0450ab8807f5,35663,1731761230502' ***** 2024-11-16T12:47:48,319 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:35663-0x10144f8dfd10002, quorum=127.0.0.1:62408, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T12:47:48,319 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-16T12:47:48,319 INFO [RS:0;0450ab8807f5:33749 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-16T12:47:48,319 INFO [RS:1;0450ab8807f5:35663 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-16T12:47:48,319 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-16T12:47:48,319 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:35227-0x10144f8dfd10000, quorum=127.0.0.1:62408, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T12:47:48,319 INFO [RS:1;0450ab8807f5:35663 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-16T12:47:48,319 INFO [RS:1;0450ab8807f5:35663 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-16T12:47:48,320 INFO [RS:1;0450ab8807f5:35663 {}] regionserver.HRegionServer(959): stopping server 0450ab8807f5,35663,1731761230502 2024-11-16T12:47:48,320 INFO [RS:1;0450ab8807f5:35663 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-16T12:47:48,320 INFO [RS:1;0450ab8807f5:35663 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;0450ab8807f5:35663. 2024-11-16T12:47:48,320 DEBUG [RS:1;0450ab8807f5:35663 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T12:47:48,320 DEBUG [RS:1;0450ab8807f5:35663 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T12:47:48,320 INFO [RS:1;0450ab8807f5:35663 {}] regionserver.HRegionServer(976): stopping server 0450ab8807f5,35663,1731761230502; all regions closed. 2024-11-16T12:47:48,320 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:47:48,320 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:47:48,321 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:47:48,321 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:47:48,321 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:47:48,321 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34023,DS-7068dce2-5d81-4029-bc2f-296d6fc5c0df,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:48,321 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34023,DS-7068dce2-5d81-4029-bc2f-296d6fc5c0df,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:48,321 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 2024-11-16T12:47:48,322 WARN [IPC Server handler 3 on default port 39393 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 has not been closed. Lease recovery is in progress. RecoveryId = 1079 for block blk_1073741837_1013 2024-11-16T12:47:48,322 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 after 1ms 2024-11-16T12:47:48,483 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.51 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d3649421ab101859b27c0f36943512c7/.tmp/info/d92573b75f4c457085c2352f03c9452e 2024-11-16T12:47:48,493 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d3649421ab101859b27c0f36943512c7/.tmp/info/d92573b75f4c457085c2352f03c9452e as hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d3649421ab101859b27c0f36943512c7/info/d92573b75f4c457085c2352f03c9452e 2024-11-16T12:47:48,502 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d3649421ab101859b27c0f36943512c7/info/d92573b75f4c457085c2352f03c9452e, entries=9, sequenceid=79, filesize=14.3 K 2024-11-16T12:47:48,503 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.51 KB/10758, heapSize ~11.48 KB/11760, currentSize=0 B/0 for d3649421ab101859b27c0f36943512c7 in 433ms, sequenceid=79, compaction requested=true 2024-11-16T12:47:48,503 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for d3649421ab101859b27c0f36943512c7: 2024-11-16T12:47:48,503 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=40.0 K, sizeToCheck=16.0 K 2024-11-16T12:47:48,503 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T12:47:48,503 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d3649421ab101859b27c0f36943512c7/info/c84097b57f774f2e891601a38610f616 because midkey is the same as first or last row 2024-11-16T12:47:48,503 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-16T12:47:48,503 INFO [RS:0;0450ab8807f5:33749 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-16T12:47:48,503 INFO [RS:0;0450ab8807f5:33749 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-16T12:47:48,503 INFO [RS:0;0450ab8807f5:33749 {}] regionserver.HRegionServer(3091): Received CLOSE for d3649421ab101859b27c0f36943512c7 2024-11-16T12:47:48,504 INFO [RS:0;0450ab8807f5:33749 {}] regionserver.HRegionServer(959): stopping server 0450ab8807f5,33749,1731761229240 2024-11-16T12:47:48,504 INFO [RS:0;0450ab8807f5:33749 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-16T12:47:48,504 INFO [RS:0;0450ab8807f5:33749 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;0450ab8807f5:33749. 2024-11-16T12:47:48,504 DEBUG [RS:0;0450ab8807f5:33749 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T12:47:48,504 DEBUG [RS:0;0450ab8807f5:33749 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T12:47:48,504 DEBUG [RS_CLOSE_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing d3649421ab101859b27c0f36943512c7, disabling compactions & flushes 2024-11-16T12:47:48,504 INFO [RS_CLOSE_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1731761230641.d3649421ab101859b27c0f36943512c7. 2024-11-16T12:47:48,504 INFO [RS:0;0450ab8807f5:33749 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-16T12:47:48,504 DEBUG [RS_CLOSE_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731761230641.d3649421ab101859b27c0f36943512c7. 2024-11-16T12:47:48,504 INFO [RS:0;0450ab8807f5:33749 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-16T12:47:48,504 INFO [RS:0;0450ab8807f5:33749 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-16T12:47:48,504 DEBUG [RS_CLOSE_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731761230641.d3649421ab101859b27c0f36943512c7. after waiting 0 ms 2024-11-16T12:47:48,504 INFO [RS:0;0450ab8807f5:33749 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-16T12:47:48,504 DEBUG [RS_CLOSE_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1731761230641.d3649421ab101859b27c0f36943512c7. 2024-11-16T12:47:48,504 INFO [RS:0;0450ab8807f5:33749 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-16T12:47:48,504 DEBUG [RS:0;0450ab8807f5:33749 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, d3649421ab101859b27c0f36943512c7=TestLogRolling-testLogRollOnDatanodeDeath,,1731761230641.d3649421ab101859b27c0f36943512c7.} 2024-11-16T12:47:48,505 DEBUG [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-16T12:47:48,505 DEBUG [RS:0;0450ab8807f5:33749 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, d3649421ab101859b27c0f36943512c7 2024-11-16T12:47:48,505 INFO [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-16T12:47:48,505 DEBUG [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-16T12:47:48,505 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731761230641.d3649421ab101859b27c0f36943512c7.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d3649421ab101859b27c0f36943512c7/info/d62a6b3f21c542f7be6c35e45cb60f73, hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d3649421ab101859b27c0f36943512c7/info/57155b9bd4324e5393d4f5a5eeec47af, hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d3649421ab101859b27c0f36943512c7/info/deafd3bbe23e4ca482fe11e6d4dedd37, hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d3649421ab101859b27c0f36943512c7/info/8116c10d294f4b49a230a05e927820b3, hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d3649421ab101859b27c0f36943512c7/info/aa7a90d88924455c8249257aa43a8515, hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d3649421ab101859b27c0f36943512c7/info/a56d48fb175343a9a9a3aab3df98e30a] to archive 2024-11-16T12:47:48,505 DEBUG [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-16T12:47:48,505 DEBUG [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-16T12:47:48,505 INFO [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.71 KB heapSize=3.75 KB 2024-11-16T12:47:48,505 ERROR [FSHLog-0-hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628-prefix:0450ab8807f5,33749,1731761229240.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34023,DS-7068dce2-5d81-4029-bc2f-296d6fc5c0df,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:48,506 WARN [FSHLog-0-hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628-prefix:0450ab8807f5,33749,1731761229240.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34023,DS-7068dce2-5d81-4029-bc2f-296d6fc5c0df,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:48,506 DEBUG [regionserver/0450ab8807f5:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 0450ab8807f5%2C33749%2C1731761229240.meta:.meta(num 1731761230290) roll requested 2024-11-16T12:47:48,506 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731761230641.d3649421ab101859b27c0f36943512c7.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-16T12:47:48,506 INFO [regionserver/0450ab8807f5:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0450ab8807f5%2C33749%2C1731761229240.meta.1731761268506.meta 2024-11-16T12:47:48,508 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731761230641.d3649421ab101859b27c0f36943512c7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d3649421ab101859b27c0f36943512c7/info/d62a6b3f21c542f7be6c35e45cb60f73 to hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d3649421ab101859b27c0f36943512c7/info/d62a6b3f21c542f7be6c35e45cb60f73 2024-11-16T12:47:48,510 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731761230641.d3649421ab101859b27c0f36943512c7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d3649421ab101859b27c0f36943512c7/info/57155b9bd4324e5393d4f5a5eeec47af to hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d3649421ab101859b27c0f36943512c7/info/57155b9bd4324e5393d4f5a5eeec47af 2024-11-16T12:47:48,512 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731761230641.d3649421ab101859b27c0f36943512c7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d3649421ab101859b27c0f36943512c7/info/deafd3bbe23e4ca482fe11e6d4dedd37 to hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d3649421ab101859b27c0f36943512c7/info/deafd3bbe23e4ca482fe11e6d4dedd37 2024-11-16T12:47:48,514 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731761230641.d3649421ab101859b27c0f36943512c7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d3649421ab101859b27c0f36943512c7/info/8116c10d294f4b49a230a05e927820b3 to hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d3649421ab101859b27c0f36943512c7/info/8116c10d294f4b49a230a05e927820b3 2024-11-16T12:47:48,515 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731761230641.d3649421ab101859b27c0f36943512c7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d3649421ab101859b27c0f36943512c7/info/aa7a90d88924455c8249257aa43a8515 to hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d3649421ab101859b27c0f36943512c7/info/aa7a90d88924455c8249257aa43a8515 2024-11-16T12:47:48,516 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:47:48,517 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:47:48,517 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:47:48,517 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731761230641.d3649421ab101859b27c0f36943512c7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d3649421ab101859b27c0f36943512c7/info/a56d48fb175343a9a9a3aab3df98e30a to hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d3649421ab101859b27c0f36943512c7/info/a56d48fb175343a9a9a3aab3df98e30a 2024-11-16T12:47:48,517 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:47:48,517 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:47:48,517 INFO [regionserver/0450ab8807f5:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta with entries=8, filesize=2.33 KB; new WAL /user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761268506.meta 2024-11-16T12:47:48,517 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731761230641.d3649421ab101859b27c0f36943512c7.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=0450ab8807f5:35227 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-11-16T12:47:48,518 WARN [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731761230641.d3649421ab101859b27c0f36943512c7.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [d62a6b3f21c542f7be6c35e45cb60f73=10347, 57155b9bd4324e5393d4f5a5eeec47af=12506, deafd3bbe23e4ca482fe11e6d4dedd37=17994, 8116c10d294f4b49a230a05e927820b3=6027, aa7a90d88924455c8249257aa43a8515=6027, a56d48fb175343a9a9a3aab3df98e30a=6027] 2024-11-16T12:47:48,518 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34023,DS-7068dce2-5d81-4029-bc2f-296d6fc5c0df,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:48,518 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34023,DS-7068dce2-5d81-4029-bc2f-296d6fc5c0df,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:47:48,518 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta 2024-11-16T12:47:48,519 WARN [IPC Server handler 2 on default port 39393 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta has not been closed. Lease recovery is in progress. RecoveryId = 1081 for block blk_1073741834_1010 2024-11-16T12:47:48,519 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta after 1ms 2024-11-16T12:47:48,531 DEBUG [regionserver/0450ab8807f5:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34707:34707),(127.0.0.1/127.0.0.1:40763:40763)] 2024-11-16T12:47:48,531 DEBUG [regionserver/0450ab8807f5:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta is not closed yet, will try archiving it next time 2024-11-16T12:47:48,531 DEBUG [RS_CLOSE_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d3649421ab101859b27c0f36943512c7/recovered.edits/82.seqid, newMaxSeqId=82, maxSeqId=1 2024-11-16T12:47:48,532 INFO [RS_CLOSE_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731761230641.d3649421ab101859b27c0f36943512c7. 2024-11-16T12:47:48,532 DEBUG [RS_CLOSE_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for d3649421ab101859b27c0f36943512c7: Waiting for close lock at 1731761268504Running coprocessor pre-close hooks at 1731761268504Disabling compacts and flushes for region at 1731761268504Disabling writes for close at 1731761268504Writing region close event to WAL at 1731761268523 (+19 ms)Running coprocessor post-close hooks at 1731761268532 (+9 ms)Closed at 1731761268532 2024-11-16T12:47:48,533 DEBUG [RS_CLOSE_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731761230641.d3649421ab101859b27c0f36943512c7. 2024-11-16T12:47:48,547 DEBUG [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/data/hbase/meta/1588230740/.tmp/info/44ed34b882c64d46b6d3c0aea3f584ab is 203, key is TestLogRolling-testLogRollOnDatanodeDeath,,1731761230641.d3649421ab101859b27c0f36943512c7./info:regioninfo/1731761231017/Put/seqid=0 2024-11-16T12:47:48,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46657 is added to blk_1073741896_1082 (size=7089) 2024-11-16T12:47:48,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41597 is added to blk_1073741896_1082 (size=7089) 2024-11-16T12:47:48,611 INFO [regionserver/0450ab8807f5:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-16T12:47:48,612 INFO [regionserver/0450ab8807f5:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-16T12:47:48,612 INFO [regionserver/0450ab8807f5:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-16T12:47:48,705 DEBUG [RS:0;0450ab8807f5:33749 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-16T12:47:48,774 INFO [regionserver/0450ab8807f5:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-16T12:47:48,774 INFO [regionserver/0450ab8807f5:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-16T12:47:48,905 DEBUG [RS:0;0450ab8807f5:33749 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-16T12:47:48,954 INFO [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.50 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/data/hbase/meta/1588230740/.tmp/info/44ed34b882c64d46b6d3c0aea3f584ab 2024-11-16T12:47:48,984 DEBUG [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/data/hbase/meta/1588230740/.tmp/ns/127aa2c596454a9094bdc31f9440ac6d is 43, key is default/ns:d/1731761230379/Put/seqid=0 2024-11-16T12:47:48,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46657 is added to blk_1073741897_1083 (size=5153) 2024-11-16T12:47:48,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41597 is added to blk_1073741897_1083 (size=5153) 2024-11-16T12:47:48,994 INFO [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/data/hbase/meta/1588230740/.tmp/ns/127aa2c596454a9094bdc31f9440ac6d 2024-11-16T12:47:49,025 DEBUG [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/data/hbase/meta/1588230740/.tmp/table/b55a3f45d361471e84227b8a0bf64fda is 77, key is TestLogRolling-testLogRollOnDatanodeDeath/table:state/1731761231029/Put/seqid=0 2024-11-16T12:47:49,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46657 is added to blk_1073741898_1084 (size=5424) 2024-11-16T12:47:49,045 INFO [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=146 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/data/hbase/meta/1588230740/.tmp/table/b55a3f45d361471e84227b8a0bf64fda 2024-11-16T12:47:49,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41597 is added to blk_1073741898_1084 (size=5424) 2024-11-16T12:47:49,055 DEBUG [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/data/hbase/meta/1588230740/.tmp/info/44ed34b882c64d46b6d3c0aea3f584ab as hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/data/hbase/meta/1588230740/info/44ed34b882c64d46b6d3c0aea3f584ab 2024-11-16T12:47:49,063 INFO [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/data/hbase/meta/1588230740/info/44ed34b882c64d46b6d3c0aea3f584ab, entries=10, sequenceid=11, filesize=6.9 K 2024-11-16T12:47:49,065 DEBUG [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/data/hbase/meta/1588230740/.tmp/ns/127aa2c596454a9094bdc31f9440ac6d as hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/data/hbase/meta/1588230740/ns/127aa2c596454a9094bdc31f9440ac6d 2024-11-16T12:47:49,073 INFO [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/data/hbase/meta/1588230740/ns/127aa2c596454a9094bdc31f9440ac6d, entries=2, sequenceid=11, filesize=5.0 K 2024-11-16T12:47:49,074 DEBUG [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/data/hbase/meta/1588230740/.tmp/table/b55a3f45d361471e84227b8a0bf64fda as hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/data/hbase/meta/1588230740/table/b55a3f45d361471e84227b8a0bf64fda 2024-11-16T12:47:49,084 INFO [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/data/hbase/meta/1588230740/table/b55a3f45d361471e84227b8a0bf64fda, entries=2, sequenceid=11, filesize=5.3 K 2024-11-16T12:47:49,085 INFO [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 580ms, sequenceid=11, compaction requested=false 2024-11-16T12:47:49,100 DEBUG [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-16T12:47:49,101 DEBUG [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-16T12:47:49,101 INFO [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-16T12:47:49,101 DEBUG [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731761268505Running coprocessor pre-close hooks at 1731761268505Disabling compacts and flushes for region at 1731761268505Disabling writes for close at 1731761268505Obtaining lock to block concurrent updates at 1731761268505Preparing flush snapshotting stores in 1588230740 at 1731761268505Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1752, getHeapSize=3776, getOffHeapSize=0, getCellsCount=14 at 1731761268506 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731761268532 (+26 ms)Flushing 1588230740/info: creating writer at 1731761268532Flushing 1588230740/info: appending metadata at 1731761268547 (+15 ms)Flushing 1588230740/info: closing flushed file at 1731761268547Flushing 1588230740/ns: creating writer at 1731761268962 (+415 ms)Flushing 1588230740/ns: appending metadata at 1731761268984 (+22 ms)Flushing 1588230740/ns: closing flushed file at 1731761268984Flushing 1588230740/table: creating writer at 1731761269003 (+19 ms)Flushing 1588230740/table: appending metadata at 1731761269025 (+22 ms)Flushing 1588230740/table: closing flushed file at 1731761269025Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4f60ebb1: reopening flushed file at 1731761269053 (+28 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@231c3e2b: reopening flushed file at 1731761269064 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6658299: reopening flushed file at 1731761269073 (+9 ms)Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 580ms, sequenceid=11, compaction requested=false at 1731761269085 (+12 ms)Writing region close event to WAL at 1731761269095 (+10 ms)Running coprocessor post-close hooks at 1731761269101 (+6 ms)Closed at 1731761269101 2024-11-16T12:47:49,101 DEBUG [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-16T12:47:49,105 INFO [RS:0;0450ab8807f5:33749 {}] regionserver.HRegionServer(976): stopping server 0450ab8807f5,33749,1731761229240; all regions closed. 2024-11-16T12:47:49,106 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:47:49,106 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:47:49,106 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:47:49,106 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:47:49,106 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:47:49,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41597 is added to blk_1073741895_1080 (size=825) 2024-11-16T12:47:49,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46657 is added to blk_1073741895_1080 (size=825) 2024-11-16T12:47:49,729 INFO [regionserver/0450ab8807f5:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-16T12:47:50,402 INFO [master/0450ab8807f5:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-16T12:47:50,402 INFO [master/0450ab8807f5:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-16T12:47:50,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46657 is added to blk_1073741835_1011 (size=393) 2024-11-16T12:47:50,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46657 is added to blk_1073741831_1007 (size=1321) 2024-11-16T12:47:51,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46657 is added to blk_1073741827_1003 (size=196) 2024-11-16T12:47:51,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46657 is added to blk_1073741829_1005 (size=34) 2024-11-16T12:47:52,323 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 after 4002ms 2024-11-16T12:47:52,520 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta after 4002ms 2024-11-16T12:47:53,321 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-16T12:47:53,324 DEBUG [RS:1;0450ab8807f5:35663 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/oldWALs 2024-11-16T12:47:53,324 INFO [RS:1;0450ab8807f5:35663 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 0450ab8807f5%2C35663%2C1731761230502:(num 1731761230742) 2024-11-16T12:47:53,325 DEBUG [RS:1;0450ab8807f5:35663 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T12:47:53,325 INFO [RS:1;0450ab8807f5:35663 {}] regionserver.LeaseManager(133): Closed leases 2024-11-16T12:47:53,325 INFO [RS:1;0450ab8807f5:35663 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-16T12:47:53,325 INFO [RS:1;0450ab8807f5:35663 {}] hbase.ChoreService(370): Chore service for: regionserver/0450ab8807f5:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-16T12:47:53,325 INFO [RS:1;0450ab8807f5:35663 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-16T12:47:53,325 INFO [regionserver/0450ab8807f5:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-16T12:47:53,325 INFO [RS:1;0450ab8807f5:35663 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-16T12:47:53,325 INFO [RS:1;0450ab8807f5:35663 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-16T12:47:53,325 INFO [RS:1;0450ab8807f5:35663 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-16T12:47:53,325 INFO [RS:1;0450ab8807f5:35663 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35663 2024-11-16T12:47:53,367 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35227-0x10144f8dfd10000, quorum=127.0.0.1:62408, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-16T12:47:53,367 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35663-0x10144f8dfd10002, quorum=127.0.0.1:62408, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/0450ab8807f5,35663,1731761230502 2024-11-16T12:47:53,367 INFO [RS:1;0450ab8807f5:35663 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-16T12:47:53,375 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [0450ab8807f5,35663,1731761230502] 2024-11-16T12:47:53,383 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/0450ab8807f5,35663,1731761230502 already deleted, retry=false 2024-11-16T12:47:53,383 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 0450ab8807f5,35663,1731761230502 expired; onlineServers=1 2024-11-16T12:47:53,476 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35663-0x10144f8dfd10002, quorum=127.0.0.1:62408, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T12:47:53,476 INFO [RS:1;0450ab8807f5:35663 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-16T12:47:53,476 INFO [RS:1;0450ab8807f5:35663 {}] regionserver.HRegionServer(1031): Exiting; stopping=0450ab8807f5,35663,1731761230502; zookeeper connection closed. 2024-11-16T12:47:53,476 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35663-0x10144f8dfd10002, quorum=127.0.0.1:62408, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T12:47:53,476 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@3ca621d4 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@3ca621d4 2024-11-16T12:47:53,534 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:47:53,555 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:47:53,555 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:47:53,555 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:47:53,555 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:47:53,556 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:47:53,564 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:47:53,565 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:47:54,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41597 is added to blk_1073741877_1060 (size=12911) 2024-11-16T12:47:54,067 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-16T12:47:54,092 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:47:54,093 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:47:54,093 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:47:54,093 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:47:54,094 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:47:54,094 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:47:54,100 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:47:54,103 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:47:54,107 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-16T12:47:54,109 DEBUG [RS:0;0450ab8807f5:33749 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/oldWALs 2024-11-16T12:47:54,109 INFO [RS:0;0450ab8807f5:33749 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 0450ab8807f5%2C33749%2C1731761229240.meta:.meta(num 1731761268506) 2024-11-16T12:47:54,110 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:47:54,110 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:47:54,110 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:47:54,110 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:47:54,110 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:47:54,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41597 is added to blk_1073741892_1076 (size=15850) 2024-11-16T12:47:54,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46657 is added to blk_1073741892_1076 (size=15850) 2024-11-16T12:47:54,114 DEBUG [RS:0;0450ab8807f5:33749 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/oldWALs 2024-11-16T12:47:54,114 INFO [RS:0;0450ab8807f5:33749 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 0450ab8807f5%2C33749%2C1731761229240:(num 1731761267802) 2024-11-16T12:47:54,114 DEBUG [RS:0;0450ab8807f5:33749 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T12:47:54,114 INFO [RS:0;0450ab8807f5:33749 {}] regionserver.LeaseManager(133): Closed leases 2024-11-16T12:47:54,115 INFO [RS:0;0450ab8807f5:33749 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-16T12:47:54,115 INFO [RS:0;0450ab8807f5:33749 {}] hbase.ChoreService(370): Chore service for: regionserver/0450ab8807f5:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-16T12:47:54,115 INFO [RS:0;0450ab8807f5:33749 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-16T12:47:54,115 INFO [regionserver/0450ab8807f5:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-16T12:47:54,115 INFO [RS:0;0450ab8807f5:33749 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33749 2024-11-16T12:47:54,175 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33749-0x10144f8dfd10001, quorum=127.0.0.1:62408, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/0450ab8807f5,33749,1731761229240 2024-11-16T12:47:54,175 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35227-0x10144f8dfd10000, quorum=127.0.0.1:62408, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-16T12:47:54,175 INFO [RS:0;0450ab8807f5:33749 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-16T12:47:54,183 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [0450ab8807f5,33749,1731761229240] 2024-11-16T12:47:54,192 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/0450ab8807f5,33749,1731761229240 already deleted, retry=false 2024-11-16T12:47:54,192 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 0450ab8807f5,33749,1731761229240 expired; onlineServers=0 2024-11-16T12:47:54,192 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '0450ab8807f5,35227,1731761229084' ***** 2024-11-16T12:47:54,192 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-16T12:47:54,192 INFO [M:0;0450ab8807f5:35227 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-16T12:47:54,192 INFO [M:0;0450ab8807f5:35227 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-16T12:47:54,192 DEBUG [M:0;0450ab8807f5:35227 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-16T12:47:54,192 DEBUG [M:0;0450ab8807f5:35227 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-16T12:47:54,192 DEBUG [master/0450ab8807f5:0:becomeActiveMaster-HFileCleaner.small.0-1731761229611 {}] cleaner.HFileCleaner(306): Exit Thread[master/0450ab8807f5:0:becomeActiveMaster-HFileCleaner.small.0-1731761229611,5,FailOnTimeoutGroup] 2024-11-16T12:47:54,192 DEBUG [master/0450ab8807f5:0:becomeActiveMaster-HFileCleaner.large.0-1731761229611 {}] cleaner.HFileCleaner(306): Exit Thread[master/0450ab8807f5:0:becomeActiveMaster-HFileCleaner.large.0-1731761229611,5,FailOnTimeoutGroup] 2024-11-16T12:47:54,192 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-16T12:47:54,192 INFO [M:0;0450ab8807f5:35227 {}] hbase.ChoreService(370): Chore service for: master/0450ab8807f5:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-16T12:47:54,192 INFO [M:0;0450ab8807f5:35227 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-16T12:47:54,193 DEBUG [M:0;0450ab8807f5:35227 {}] master.HMaster(1795): Stopping service threads 2024-11-16T12:47:54,193 INFO [M:0;0450ab8807f5:35227 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-16T12:47:54,193 INFO [M:0;0450ab8807f5:35227 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-16T12:47:54,193 INFO [M:0;0450ab8807f5:35227 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-16T12:47:54,193 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-16T12:47:54,200 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35227-0x10144f8dfd10000, quorum=127.0.0.1:62408, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-16T12:47:54,200 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35227-0x10144f8dfd10000, quorum=127.0.0.1:62408, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:47:54,200 DEBUG [M:0;0450ab8807f5:35227 {}] zookeeper.ZKUtil(347): master:35227-0x10144f8dfd10000, quorum=127.0.0.1:62408, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-16T12:47:54,200 WARN [M:0;0450ab8807f5:35227 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-16T12:47:54,201 INFO [M:0;0450ab8807f5:35227 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/.lastflushedseqids 2024-11-16T12:47:54,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46657 is added to blk_1073741899_1085 (size=130) 2024-11-16T12:47:54,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41597 is added to blk_1073741899_1085 (size=130) 2024-11-16T12:47:54,207 INFO [M:0;0450ab8807f5:35227 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-16T12:47:54,207 INFO [M:0;0450ab8807f5:35227 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-16T12:47:54,207 DEBUG [M:0;0450ab8807f5:35227 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-16T12:47:54,207 INFO [M:0;0450ab8807f5:35227 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T12:47:54,207 DEBUG [M:0;0450ab8807f5:35227 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T12:47:54,207 DEBUG [M:0;0450ab8807f5:35227 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-16T12:47:54,207 DEBUG [M:0;0450ab8807f5:35227 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T12:47:54,208 INFO [M:0;0450ab8807f5:35227 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.26 KB heapSize=29.50 KB 2024-11-16T12:47:54,226 DEBUG [M:0;0450ab8807f5:35227 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/3210d93d12574680902e48bb40fd5628 is 82, key is hbase:meta,,1/info:regioninfo/1731761230320/Put/seqid=0 2024-11-16T12:47:54,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46657 is added to blk_1073741900_1086 (size=5672) 2024-11-16T12:47:54,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41597 is added to blk_1073741900_1086 (size=5672) 2024-11-16T12:47:54,231 INFO [M:0;0450ab8807f5:35227 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/3210d93d12574680902e48bb40fd5628 2024-11-16T12:47:54,255 DEBUG [M:0;0450ab8807f5:35227 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/0013dc018d0c4d52b812bced308ff72e is 775, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731761231034/Put/seqid=0 2024-11-16T12:47:54,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41597 is added to blk_1073741901_1087 (size=6256) 2024-11-16T12:47:54,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46657 is added to blk_1073741901_1087 (size=6256) 2024-11-16T12:47:54,260 INFO [M:0;0450ab8807f5:35227 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.59 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/0013dc018d0c4d52b812bced308ff72e 2024-11-16T12:47:54,265 INFO [M:0;0450ab8807f5:35227 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 0013dc018d0c4d52b812bced308ff72e 2024-11-16T12:47:54,280 DEBUG [M:0;0450ab8807f5:35227 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/6502ab5485fc456b86a61cb3fb23b55a is 69, key is 0450ab8807f5,33749,1731761229240/rs:state/1731761229709/Put/seqid=0 2024-11-16T12:47:54,284 INFO [RS:0;0450ab8807f5:33749 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-16T12:47:54,284 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33749-0x10144f8dfd10001, quorum=127.0.0.1:62408, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T12:47:54,284 INFO [RS:0;0450ab8807f5:33749 {}] regionserver.HRegionServer(1031): Exiting; stopping=0450ab8807f5,33749,1731761229240; zookeeper connection closed. 2024-11-16T12:47:54,284 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33749-0x10144f8dfd10001, quorum=127.0.0.1:62408, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T12:47:54,285 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@14c4ee89 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@14c4ee89 2024-11-16T12:47:54,285 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 2 regionserver(s) complete 2024-11-16T12:47:54,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41597 is added to blk_1073741902_1088 (size=5224) 2024-11-16T12:47:54,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46657 is added to blk_1073741902_1088 (size=5224) 2024-11-16T12:47:54,288 INFO [M:0;0450ab8807f5:35227 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=130 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/6502ab5485fc456b86a61cb3fb23b55a 2024-11-16T12:47:54,310 DEBUG [M:0;0450ab8807f5:35227 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/00ec21384ac548c6b85b80921b18b2be is 52, key is load_balancer_on/state:d/1731761230483/Put/seqid=0 2024-11-16T12:47:54,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46657 is added to blk_1073741903_1089 (size=5056) 2024-11-16T12:47:54,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41597 is added to blk_1073741903_1089 (size=5056) 2024-11-16T12:47:54,316 INFO [M:0;0450ab8807f5:35227 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/00ec21384ac548c6b85b80921b18b2be 2024-11-16T12:47:54,322 DEBUG [M:0;0450ab8807f5:35227 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/3210d93d12574680902e48bb40fd5628 as hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/3210d93d12574680902e48bb40fd5628 2024-11-16T12:47:54,324 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:47:54,328 INFO [M:0;0450ab8807f5:35227 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/3210d93d12574680902e48bb40fd5628, entries=8, sequenceid=60, filesize=5.5 K 2024-11-16T12:47:54,329 DEBUG [M:0;0450ab8807f5:35227 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/0013dc018d0c4d52b812bced308ff72e as hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/0013dc018d0c4d52b812bced308ff72e 2024-11-16T12:47:54,334 INFO [M:0;0450ab8807f5:35227 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 0013dc018d0c4d52b812bced308ff72e 2024-11-16T12:47:54,334 INFO [M:0;0450ab8807f5:35227 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/0013dc018d0c4d52b812bced308ff72e, entries=6, sequenceid=60, filesize=6.1 K 2024-11-16T12:47:54,336 DEBUG [M:0;0450ab8807f5:35227 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/6502ab5485fc456b86a61cb3fb23b55a as hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/6502ab5485fc456b86a61cb3fb23b55a 2024-11-16T12:47:54,342 INFO [M:0;0450ab8807f5:35227 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/6502ab5485fc456b86a61cb3fb23b55a, entries=2, sequenceid=60, filesize=5.1 K 2024-11-16T12:47:54,343 DEBUG [M:0;0450ab8807f5:35227 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/00ec21384ac548c6b85b80921b18b2be as hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/00ec21384ac548c6b85b80921b18b2be 2024-11-16T12:47:54,349 INFO [M:0;0450ab8807f5:35227 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/00ec21384ac548c6b85b80921b18b2be, entries=1, sequenceid=60, filesize=4.9 K 2024-11-16T12:47:54,350 INFO [M:0;0450ab8807f5:35227 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.26 KB/23817, heapSize ~29.44 KB/30144, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 143ms, sequenceid=60, compaction requested=false 2024-11-16T12:47:54,352 INFO [M:0;0450ab8807f5:35227 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T12:47:54,352 DEBUG [M:0;0450ab8807f5:35227 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731761274207Disabling compacts and flushes for region at 1731761274207Disabling writes for close at 1731761274207Obtaining lock to block concurrent updates at 1731761274208 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731761274208Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23817, getHeapSize=30144, getOffHeapSize=0, getCellsCount=71 at 1731761274208Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731761274209 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731761274209Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731761274225 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731761274225Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731761274237 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731761274254 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731761274254Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731761274265 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731761274280 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731761274280Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731761274293 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731761274310 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731761274310Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@657ce787: reopening flushed file at 1731761274321 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6f47c251: reopening flushed file at 1731761274328 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@37c10dc3: reopening flushed file at 1731761274335 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2184bd35: reopening flushed file at 1731761274342 (+7 ms)Finished flush of dataSize ~23.26 KB/23817, heapSize ~29.44 KB/30144, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 143ms, sequenceid=60, compaction requested=false at 1731761274350 (+8 ms)Writing region close event to WAL at 1731761274352 (+2 ms)Closed at 1731761274352 2024-11-16T12:47:54,352 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:47:54,353 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:47:54,353 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:47:54,353 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:47:54,353 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:47:54,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41597 is added to blk_1073741889_1072 (size=1045) 2024-11-16T12:47:54,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46657 is added to blk_1073741889_1072 (size=1045) 2024-11-16T12:47:54,521 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:47:54,926 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@d0b10cc {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-886867469-172.17.0.2-1731761227469:blk_1073741830_1006, datanode=DatanodeInfoWithStorage[127.0.0.1:34023,null,null]) java.net.ConnectException: Call From 0450ab8807f5/172.17.0.2 to localhost:40715 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-16T12:47:55,325 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:47:55,522 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:47:55,644 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/MasterData/WALs/0450ab8807f5,35227,1731761229084/0450ab8807f5%2C35227%2C1731761229084.1731761229377 to hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/MasterData/oldWALs/0450ab8807f5%2C35227%2C1731761229084.1731761229377 2024-11-16T12:47:55,648 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/MasterData/oldWALs/0450ab8807f5%2C35227%2C1731761229084.1731761229377 to hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/oldWALs/0450ab8807f5%2C35227%2C1731761229084.1731761229377$masterlocalwal$ 2024-11-16T12:47:55,649 INFO [M:0;0450ab8807f5:35227 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-16T12:47:55,649 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-16T12:47:55,649 INFO [M:0;0450ab8807f5:35227 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35227 2024-11-16T12:47:55,649 INFO [M:0;0450ab8807f5:35227 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-16T12:47:55,781 INFO [M:0;0450ab8807f5:35227 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-16T12:47:55,781 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35227-0x10144f8dfd10000, quorum=127.0.0.1:62408, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T12:47:55,781 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35227-0x10144f8dfd10000, quorum=127.0.0.1:62408, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T12:47:55,785 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1c23e5e9{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T12:47:55,785 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@62dc63d8{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T12:47:55,785 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T12:47:55,786 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6e48919f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T12:47:55,786 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6d04dc38{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/hadoop.log.dir/,STOPPED} 2024-11-16T12:47:55,788 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T12:47:55,788 WARN [BP-886867469-172.17.0.2-1731761227469 heartbeating to localhost/127.0.0.1:39393 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T12:47:55,788 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T12:47:55,788 WARN [BP-886867469-172.17.0.2-1731761227469 heartbeating to localhost/127.0.0.1:39393 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-886867469-172.17.0.2-1731761227469 (Datanode Uuid fc9bb905-c3c0-4f61-a035-12b1008d0723) service to localhost/127.0.0.1:39393 2024-11-16T12:47:55,787 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@595d5d73 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-886867469-172.17.0.2-1731761227469:blk_1073741837_1013, datanode=DatanodeInfoWithStorage[127.0.0.1:34023,null,null]) java.io.InterruptedIOException: DestHost:destPort localhost:40715 , LocalHost:localPort 0450ab8807f5/172.17.0.2:0. Failed on local exception: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:936) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:963) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more Caused by: java.lang.InterruptedException: sleep interrupted at java.lang.Thread.sleep(Native Method) ~[?:?] at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-16T12:47:55,788 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@595d5d73 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-886867469-172.17.0.2-1731761227469:blk_1073741837_1013, datanode=DatanodeInfoWithStorage[127.0.0.1:41597,null,null]) java.io.IOException: No block pool offer service for bpid=BP-886867469-172.17.0.2-1731761227469 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T12:47:55,789 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@595d5d73 {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-886867469-172.17.0.2-1731761227469:blk_1073741837_1013; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:34023,null,null], DatanodeInfoWithStorage[127.0.0.1:41597,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: All datanodes failed: block=BP-886867469-172.17.0.2-1731761227469:blk_1073741837_1013, datanodeids=[DatanodeInfoWithStorage[127.0.0.1:34023,null,null], DatanodeInfoWithStorage[127.0.0.1:41597,null,null]] 2024-11-16T12:47:55,789 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/cluster_7a19b596-aa5b-5866-3868-b2473a5bf7fd/data/data3/current/BP-886867469-172.17.0.2-1731761227469 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T12:47:55,789 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@595d5d73 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-886867469-172.17.0.2-1731761227469:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:41597,null,null]) java.io.IOException: No block pool offer service for bpid=BP-886867469-172.17.0.2-1731761227469 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T12:47:55,789 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@595d5d73 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-886867469-172.17.0.2-1731761227469:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:34023,null,null]) java.io.IOException: No block pool offer service for bpid=BP-886867469-172.17.0.2-1731761227469 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T12:47:55,789 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@595d5d73 {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-886867469-172.17.0.2-1731761227469:blk_1073741834_1010; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:41597,null,null], DatanodeInfoWithStorage[127.0.0.1:34023,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: All datanodes failed: block=BP-886867469-172.17.0.2-1731761227469:blk_1073741834_1010, datanodeids=[DatanodeInfoWithStorage[127.0.0.1:41597,null,null], DatanodeInfoWithStorage[127.0.0.1:34023,null,null]] 2024-11-16T12:47:55,789 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/cluster_7a19b596-aa5b-5866-3868-b2473a5bf7fd/data/data4/current/BP-886867469-172.17.0.2-1731761227469 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T12:47:55,790 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T12:47:55,793 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@79f7fcc4{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T12:47:55,793 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5fb4bc9e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T12:47:55,793 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T12:47:55,793 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@178f342a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T12:47:55,793 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3247fd57{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/hadoop.log.dir/,STOPPED} 2024-11-16T12:47:55,800 WARN [BP-886867469-172.17.0.2-1731761227469 heartbeating to localhost/127.0.0.1:39393 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T12:47:55,800 WARN [BP-886867469-172.17.0.2-1731761227469 heartbeating to localhost/127.0.0.1:39393 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-886867469-172.17.0.2-1731761227469 (Datanode Uuid c07e50c5-97c3-4ced-b66a-aa7258388f1b) service to localhost/127.0.0.1:39393 2024-11-16T12:47:55,801 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/cluster_7a19b596-aa5b-5866-3868-b2473a5bf7fd/data/data7/current/BP-886867469-172.17.0.2-1731761227469 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T12:47:55,801 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/cluster_7a19b596-aa5b-5866-3868-b2473a5bf7fd/data/data8/current/BP-886867469-172.17.0.2-1731761227469 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T12:47:55,801 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T12:47:55,801 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T12:47:55,801 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T12:47:55,807 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4549eece{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-16T12:47:55,807 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@21d5e4af{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T12:47:55,807 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T12:47:55,807 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@229a8eec{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T12:47:55,807 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7a9b2a9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/hadoop.log.dir/,STOPPED} 2024-11-16T12:47:55,815 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-16T12:47:55,849 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-16T12:47:55,857 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=156 (was 81) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$900/0x00007fb7f8bef150.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-17-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:39393 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins@localhost:39393 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-17-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:39393 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39393 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:39393 from jenkins.hfs.2 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-17-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:39393 from jenkins.hfs.3 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.2@localhost:39393 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.3@localhost:39393 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-16-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$900/0x00007fb7f8bef150.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39393 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'DataNode' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: nioEventLoopGroup-21-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39393 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.1@localhost:32873 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-16-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-16-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39393 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:32873 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=448 (was 403) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=184 (was 161) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=3292 (was 4398) 2024-11-16T12:47:55,865 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=156, OpenFileDescriptor=448, MaxFileDescriptor=1048576, SystemLoadAverage=184, ProcessCount=11, AvailableMemoryMB=3292 2024-11-16T12:47:55,865 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-16T12:47:55,866 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/hadoop.log.dir so I do NOT create it in target/test-data/38e4ecdb-024e-29a1-2b9a-2c293ee355e5 2024-11-16T12:47:55,866 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c849e70-2ba5-99e8-9593-1804ea33f1a6/hadoop.tmp.dir so I do NOT create it in target/test-data/38e4ecdb-024e-29a1-2b9a-2c293ee355e5 2024-11-16T12:47:55,866 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38e4ecdb-024e-29a1-2b9a-2c293ee355e5/cluster_3a137a3e-714c-a10c-73f2-ab0b29e449ad, deleteOnExit=true 2024-11-16T12:47:55,866 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-16T12:47:55,866 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38e4ecdb-024e-29a1-2b9a-2c293ee355e5/test.cache.data in system properties and HBase conf 2024-11-16T12:47:55,866 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38e4ecdb-024e-29a1-2b9a-2c293ee355e5/hadoop.tmp.dir in system properties and HBase conf 2024-11-16T12:47:55,866 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38e4ecdb-024e-29a1-2b9a-2c293ee355e5/hadoop.log.dir in system properties and HBase conf 2024-11-16T12:47:55,866 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38e4ecdb-024e-29a1-2b9a-2c293ee355e5/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-16T12:47:55,867 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38e4ecdb-024e-29a1-2b9a-2c293ee355e5/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-16T12:47:55,867 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-16T12:47:55,867 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-16T12:47:55,867 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38e4ecdb-024e-29a1-2b9a-2c293ee355e5/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-16T12:47:55,867 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38e4ecdb-024e-29a1-2b9a-2c293ee355e5/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-16T12:47:55,867 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38e4ecdb-024e-29a1-2b9a-2c293ee355e5/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-16T12:47:55,867 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38e4ecdb-024e-29a1-2b9a-2c293ee355e5/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-16T12:47:55,868 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38e4ecdb-024e-29a1-2b9a-2c293ee355e5/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-16T12:47:55,868 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38e4ecdb-024e-29a1-2b9a-2c293ee355e5/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-16T12:47:55,868 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38e4ecdb-024e-29a1-2b9a-2c293ee355e5/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-16T12:47:55,868 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38e4ecdb-024e-29a1-2b9a-2c293ee355e5/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-16T12:47:55,868 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38e4ecdb-024e-29a1-2b9a-2c293ee355e5/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-16T12:47:55,868 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38e4ecdb-024e-29a1-2b9a-2c293ee355e5/nfs.dump.dir in system properties and HBase conf 2024-11-16T12:47:55,868 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38e4ecdb-024e-29a1-2b9a-2c293ee355e5/java.io.tmpdir in system properties and HBase conf 2024-11-16T12:47:55,868 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38e4ecdb-024e-29a1-2b9a-2c293ee355e5/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-16T12:47:55,869 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38e4ecdb-024e-29a1-2b9a-2c293ee355e5/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-16T12:47:55,869 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38e4ecdb-024e-29a1-2b9a-2c293ee355e5/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-16T12:47:55,888 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-16T12:47:56,207 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T12:47:56,213 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T12:47:56,215 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T12:47:56,215 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T12:47:56,215 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-16T12:47:56,219 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T12:47:56,227 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@56e801fe{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38e4ecdb-024e-29a1-2b9a-2c293ee355e5/hadoop.log.dir/,AVAILABLE} 2024-11-16T12:47:56,227 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7dee0203{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T12:47:56,326 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:47:56,335 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@27ffc774{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38e4ecdb-024e-29a1-2b9a-2c293ee355e5/java.io.tmpdir/jetty-localhost-36525-hadoop-hdfs-3_4_1-tests_jar-_-any-8492610060502390394/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-16T12:47:56,336 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@144c75a2{HTTP/1.1, (http/1.1)}{localhost:36525} 2024-11-16T12:47:56,336 INFO [Time-limited test {}] server.Server(415): Started @155223ms 2024-11-16T12:47:56,349 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-16T12:47:56,522 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:47:56,554 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T12:47:56,557 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T12:47:56,559 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T12:47:56,559 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T12:47:56,559 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-16T12:47:56,560 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@64434c96{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38e4ecdb-024e-29a1-2b9a-2c293ee355e5/hadoop.log.dir/,AVAILABLE} 2024-11-16T12:47:56,560 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@47946b20{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T12:47:56,663 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3c9f0fbb{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38e4ecdb-024e-29a1-2b9a-2c293ee355e5/java.io.tmpdir/jetty-localhost-43657-hadoop-hdfs-3_4_1-tests_jar-_-any-6263843383159930650/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T12:47:56,664 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4958e5b2{HTTP/1.1, (http/1.1)}{localhost:43657} 2024-11-16T12:47:56,664 INFO [Time-limited test {}] server.Server(415): Started @155551ms 2024-11-16T12:47:56,665 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T12:47:56,693 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T12:47:56,698 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T12:47:56,698 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T12:47:56,698 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T12:47:56,699 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-16T12:47:56,699 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@72852221{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38e4ecdb-024e-29a1-2b9a-2c293ee355e5/hadoop.log.dir/,AVAILABLE} 2024-11-16T12:47:56,700 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6bf577b6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T12:47:56,807 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@269fb75c{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38e4ecdb-024e-29a1-2b9a-2c293ee355e5/java.io.tmpdir/jetty-localhost-43277-hadoop-hdfs-3_4_1-tests_jar-_-any-9996361746482843305/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T12:47:56,808 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@12f241e2{HTTP/1.1, (http/1.1)}{localhost:43277} 2024-11-16T12:47:56,808 INFO [Time-limited test {}] server.Server(415): Started @155695ms 2024-11-16T12:47:56,809 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T12:47:56,950 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-16T12:47:56,950 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-16T12:47:56,950 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-16T12:47:56,951 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-16T12:47:57,326 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:47:57,372 WARN [Thread-1202 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38e4ecdb-024e-29a1-2b9a-2c293ee355e5/cluster_3a137a3e-714c-a10c-73f2-ab0b29e449ad/data/data2/current/BP-1070433440-172.17.0.2-1731761275903/current, will proceed with Du for space computation calculation, 2024-11-16T12:47:57,372 WARN [Thread-1201 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38e4ecdb-024e-29a1-2b9a-2c293ee355e5/cluster_3a137a3e-714c-a10c-73f2-ab0b29e449ad/data/data1/current/BP-1070433440-172.17.0.2-1731761275903/current, will proceed with Du for space computation calculation, 2024-11-16T12:47:57,399 WARN [Thread-1165 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T12:47:57,402 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1536b6ce6755bb29 with lease ID 0xa345d0ec9465f530: Processing first storage report for DS-2a54cb75-d9d7-4992-bf7f-7983949a16b1 from datanode DatanodeRegistration(127.0.0.1:45771, datanodeUuid=16dc9a8c-507c-4b32-ac6d-c66c3aa68ced, infoPort=46113, infoSecurePort=0, ipcPort=38325, storageInfo=lv=-57;cid=testClusterID;nsid=1286542615;c=1731761275903) 2024-11-16T12:47:57,402 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1536b6ce6755bb29 with lease ID 0xa345d0ec9465f530: from storage DS-2a54cb75-d9d7-4992-bf7f-7983949a16b1 node DatanodeRegistration(127.0.0.1:45771, datanodeUuid=16dc9a8c-507c-4b32-ac6d-c66c3aa68ced, infoPort=46113, infoSecurePort=0, ipcPort=38325, storageInfo=lv=-57;cid=testClusterID;nsid=1286542615;c=1731761275903), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T12:47:57,402 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1536b6ce6755bb29 with lease ID 0xa345d0ec9465f530: Processing first storage report for DS-cc63f746-af09-442a-80bd-397284130258 from datanode DatanodeRegistration(127.0.0.1:45771, datanodeUuid=16dc9a8c-507c-4b32-ac6d-c66c3aa68ced, infoPort=46113, infoSecurePort=0, ipcPort=38325, storageInfo=lv=-57;cid=testClusterID;nsid=1286542615;c=1731761275903) 2024-11-16T12:47:57,402 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1536b6ce6755bb29 with lease ID 0xa345d0ec9465f530: from storage DS-cc63f746-af09-442a-80bd-397284130258 node DatanodeRegistration(127.0.0.1:45771, datanodeUuid=16dc9a8c-507c-4b32-ac6d-c66c3aa68ced, infoPort=46113, infoSecurePort=0, ipcPort=38325, storageInfo=lv=-57;cid=testClusterID;nsid=1286542615;c=1731761275903), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T12:47:57,523 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:47:57,527 WARN [Thread-1212 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38e4ecdb-024e-29a1-2b9a-2c293ee355e5/cluster_3a137a3e-714c-a10c-73f2-ab0b29e449ad/data/data3/current/BP-1070433440-172.17.0.2-1731761275903/current, will proceed with Du for space computation calculation, 2024-11-16T12:47:57,527 WARN [Thread-1213 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38e4ecdb-024e-29a1-2b9a-2c293ee355e5/cluster_3a137a3e-714c-a10c-73f2-ab0b29e449ad/data/data4/current/BP-1070433440-172.17.0.2-1731761275903/current, will proceed with Du for space computation calculation, 2024-11-16T12:47:57,546 WARN [Thread-1188 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T12:47:57,548 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x44dd16eada5afa02 with lease ID 0xa345d0ec9465f531: Processing first storage report for DS-4d01d5cb-26c9-436f-9935-2950565c486b from datanode DatanodeRegistration(127.0.0.1:38869, datanodeUuid=601e6cc2-940d-4257-b079-927fb951f851, infoPort=33753, infoSecurePort=0, ipcPort=43919, storageInfo=lv=-57;cid=testClusterID;nsid=1286542615;c=1731761275903) 2024-11-16T12:47:57,548 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x44dd16eada5afa02 with lease ID 0xa345d0ec9465f531: from storage DS-4d01d5cb-26c9-436f-9935-2950565c486b node DatanodeRegistration(127.0.0.1:38869, datanodeUuid=601e6cc2-940d-4257-b079-927fb951f851, infoPort=33753, infoSecurePort=0, ipcPort=43919, storageInfo=lv=-57;cid=testClusterID;nsid=1286542615;c=1731761275903), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T12:47:57,548 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x44dd16eada5afa02 with lease ID 0xa345d0ec9465f531: Processing first storage report for DS-c2925da7-9402-41e8-b796-28a9a00de2c3 from datanode DatanodeRegistration(127.0.0.1:38869, datanodeUuid=601e6cc2-940d-4257-b079-927fb951f851, infoPort=33753, infoSecurePort=0, ipcPort=43919, storageInfo=lv=-57;cid=testClusterID;nsid=1286542615;c=1731761275903) 2024-11-16T12:47:57,548 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x44dd16eada5afa02 with lease ID 0xa345d0ec9465f531: from storage DS-c2925da7-9402-41e8-b796-28a9a00de2c3 node DatanodeRegistration(127.0.0.1:38869, datanodeUuid=601e6cc2-940d-4257-b079-927fb951f851, infoPort=33753, infoSecurePort=0, ipcPort=43919, storageInfo=lv=-57;cid=testClusterID;nsid=1286542615;c=1731761275903), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-16T12:47:57,641 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38e4ecdb-024e-29a1-2b9a-2c293ee355e5 2024-11-16T12:47:57,644 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38e4ecdb-024e-29a1-2b9a-2c293ee355e5/cluster_3a137a3e-714c-a10c-73f2-ab0b29e449ad/zookeeper_0, clientPort=50242, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38e4ecdb-024e-29a1-2b9a-2c293ee355e5/cluster_3a137a3e-714c-a10c-73f2-ab0b29e449ad/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38e4ecdb-024e-29a1-2b9a-2c293ee355e5/cluster_3a137a3e-714c-a10c-73f2-ab0b29e449ad/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-16T12:47:57,645 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=50242 2024-11-16T12:47:57,645 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T12:47:57,647 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T12:47:57,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741825_1001 (size=7) 2024-11-16T12:47:57,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38869 is added to blk_1073741825_1001 (size=7) 2024-11-16T12:47:57,662 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac with version=8 2024-11-16T12:47:57,662 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/hbase-staging 2024-11-16T12:47:57,665 INFO [Time-limited test {}] client.ConnectionUtils(128): master/0450ab8807f5:0 server-side Connection retries=45 2024-11-16T12:47:57,665 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T12:47:57,665 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-16T12:47:57,665 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-16T12:47:57,665 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T12:47:57,665 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-16T12:47:57,665 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-16T12:47:57,665 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-16T12:47:57,666 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33647 2024-11-16T12:47:57,668 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:33647 connecting to ZooKeeper ensemble=127.0.0.1:50242 2024-11-16T12:47:57,717 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:336470x0, quorum=127.0.0.1:50242, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-16T12:47:57,717 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:33647-0x10144f99d900000 connected 2024-11-16T12:47:57,783 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T12:47:57,786 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T12:47:57,789 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33647-0x10144f99d900000, quorum=127.0.0.1:50242, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T12:47:57,789 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac, hbase.cluster.distributed=false 2024-11-16T12:47:57,791 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33647-0x10144f99d900000, quorum=127.0.0.1:50242, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-16T12:47:57,792 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33647 2024-11-16T12:47:57,792 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33647 2024-11-16T12:47:57,792 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33647 2024-11-16T12:47:57,793 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33647 2024-11-16T12:47:57,793 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33647 2024-11-16T12:47:57,809 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/0450ab8807f5:0 server-side Connection retries=45 2024-11-16T12:47:57,809 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T12:47:57,809 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-16T12:47:57,809 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-16T12:47:57,809 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T12:47:57,809 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-16T12:47:57,809 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-16T12:47:57,810 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-16T12:47:57,810 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34331 2024-11-16T12:47:57,812 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:34331 connecting to ZooKeeper ensemble=127.0.0.1:50242 2024-11-16T12:47:57,813 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T12:47:57,814 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T12:47:57,825 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:343310x0, quorum=127.0.0.1:50242, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-16T12:47:57,825 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:34331-0x10144f99d900001 connected 2024-11-16T12:47:57,825 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34331-0x10144f99d900001, quorum=127.0.0.1:50242, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T12:47:57,825 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-16T12:47:57,826 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-16T12:47:57,827 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34331-0x10144f99d900001, quorum=127.0.0.1:50242, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-16T12:47:57,828 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34331-0x10144f99d900001, quorum=127.0.0.1:50242, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-16T12:47:57,828 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34331 2024-11-16T12:47:57,828 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34331 2024-11-16T12:47:57,829 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34331 2024-11-16T12:47:57,829 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34331 2024-11-16T12:47:57,829 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34331 2024-11-16T12:47:57,842 DEBUG [M:0;0450ab8807f5:33647 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;0450ab8807f5:33647 2024-11-16T12:47:57,842 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/0450ab8807f5,33647,1731761277664 2024-11-16T12:47:57,849 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34331-0x10144f99d900001, quorum=127.0.0.1:50242, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T12:47:57,849 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33647-0x10144f99d900000, quorum=127.0.0.1:50242, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T12:47:57,850 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33647-0x10144f99d900000, quorum=127.0.0.1:50242, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/0450ab8807f5,33647,1731761277664 2024-11-16T12:47:57,858 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34331-0x10144f99d900001, quorum=127.0.0.1:50242, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-16T12:47:57,858 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33647-0x10144f99d900000, quorum=127.0.0.1:50242, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:47:57,858 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34331-0x10144f99d900001, quorum=127.0.0.1:50242, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:47:57,858 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33647-0x10144f99d900000, quorum=127.0.0.1:50242, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-16T12:47:57,859 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/0450ab8807f5,33647,1731761277664 from backup master directory 2024-11-16T12:47:57,866 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34331-0x10144f99d900001, quorum=127.0.0.1:50242, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T12:47:57,866 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33647-0x10144f99d900000, quorum=127.0.0.1:50242, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/0450ab8807f5,33647,1731761277664 2024-11-16T12:47:57,866 WARN [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-16T12:47:57,866 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33647-0x10144f99d900000, quorum=127.0.0.1:50242, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T12:47:57,866 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=0450ab8807f5,33647,1731761277664 2024-11-16T12:47:57,871 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/hbase.id] with ID: b529f6d0-a01f-468b-87e6-72f2b274a675 2024-11-16T12:47:57,871 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/.tmp/hbase.id 2024-11-16T12:47:57,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741826_1002 (size=42) 2024-11-16T12:47:57,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38869 is added to blk_1073741826_1002 (size=42) 2024-11-16T12:47:57,878 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/.tmp/hbase.id]:[hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/hbase.id] 2024-11-16T12:47:57,891 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T12:47:57,891 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-16T12:47:57,893 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-16T12:47:57,900 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33647-0x10144f99d900000, quorum=127.0.0.1:50242, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:47:57,900 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34331-0x10144f99d900001, quorum=127.0.0.1:50242, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:47:57,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741827_1003 (size=196) 2024-11-16T12:47:57,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38869 is added to blk_1073741827_1003 (size=196) 2024-11-16T12:47:57,907 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-16T12:47:57,907 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-16T12:47:57,907 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T12:47:57,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38869 is added to blk_1073741828_1004 (size=1189) 2024-11-16T12:47:57,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741828_1004 (size=1189) 2024-11-16T12:47:57,916 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/MasterData/data/master/store 2024-11-16T12:47:57,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38869 is added to blk_1073741829_1005 (size=34) 2024-11-16T12:47:57,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741829_1005 (size=34) 2024-11-16T12:47:57,923 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T12:47:57,923 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-16T12:47:57,923 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T12:47:57,923 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T12:47:57,923 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-16T12:47:57,923 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T12:47:57,923 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T12:47:57,923 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731761277923Disabling compacts and flushes for region at 1731761277923Disabling writes for close at 1731761277923Writing region close event to WAL at 1731761277923Closed at 1731761277923 2024-11-16T12:47:57,924 WARN [master/0450ab8807f5:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/MasterData/data/master/store/.initializing 2024-11-16T12:47:57,924 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/MasterData/WALs/0450ab8807f5,33647,1731761277664 2024-11-16T12:47:57,927 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0450ab8807f5%2C33647%2C1731761277664, suffix=, logDir=hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/MasterData/WALs/0450ab8807f5,33647,1731761277664, archiveDir=hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/MasterData/oldWALs, maxLogs=10 2024-11-16T12:47:57,928 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0450ab8807f5%2C33647%2C1731761277664.1731761277928 2024-11-16T12:47:57,936 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/MasterData/WALs/0450ab8807f5,33647,1731761277664/0450ab8807f5%2C33647%2C1731761277664.1731761277928 2024-11-16T12:47:57,936 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33753:33753),(127.0.0.1/127.0.0.1:46113:46113)] 2024-11-16T12:47:57,937 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-16T12:47:57,937 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T12:47:57,937 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T12:47:57,937 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T12:47:57,943 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T12:47:57,944 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-16T12:47:57,944 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:47:57,945 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T12:47:57,945 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T12:47:57,946 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-16T12:47:57,946 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:47:57,947 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T12:47:57,947 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T12:47:57,948 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-16T12:47:57,948 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:47:57,949 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T12:47:57,949 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T12:47:57,951 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-16T12:47:57,951 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:47:57,951 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T12:47:57,952 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T12:47:57,952 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-16T12:47:57,953 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-16T12:47:57,954 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T12:47:57,954 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T12:47:57,954 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-16T12:47:57,956 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T12:47:57,958 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T12:47:57,958 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=844580, jitterRate=0.07393927872180939}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-16T12:47:57,959 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731761277938Initializing all the Stores at 1731761277938Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731761277939 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731761277942 (+3 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731761277942Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731761277942Cleaning up temporary data from old regions at 1731761277954 (+12 ms)Region opened successfully at 1731761277959 (+5 ms) 2024-11-16T12:47:57,962 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-16T12:47:57,965 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@371189e2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=0450ab8807f5/172.17.0.2:0 2024-11-16T12:47:57,966 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-16T12:47:57,966 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-16T12:47:57,966 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-16T12:47:57,967 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-16T12:47:57,967 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-16T12:47:57,968 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-16T12:47:57,968 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-16T12:47:57,970 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-16T12:47:57,970 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33647-0x10144f99d900000, quorum=127.0.0.1:50242, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-16T12:47:58,005 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-16T12:47:58,006 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-16T12:47:58,007 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33647-0x10144f99d900000, quorum=127.0.0.1:50242, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-16T12:47:58,024 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-16T12:47:58,025 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-16T12:47:58,026 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33647-0x10144f99d900000, quorum=127.0.0.1:50242, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-16T12:47:58,033 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-16T12:47:58,035 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33647-0x10144f99d900000, quorum=127.0.0.1:50242, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-16T12:47:58,041 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-16T12:47:58,043 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33647-0x10144f99d900000, quorum=127.0.0.1:50242, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-16T12:47:58,049 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-16T12:47:58,058 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34331-0x10144f99d900001, quorum=127.0.0.1:50242, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-16T12:47:58,058 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33647-0x10144f99d900000, quorum=127.0.0.1:50242, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-16T12:47:58,058 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33647-0x10144f99d900000, quorum=127.0.0.1:50242, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:47:58,058 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34331-0x10144f99d900001, quorum=127.0.0.1:50242, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:47:58,059 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=0450ab8807f5,33647,1731761277664, sessionid=0x10144f99d900000, setting cluster-up flag (Was=false) 2024-11-16T12:47:58,075 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34331-0x10144f99d900001, quorum=127.0.0.1:50242, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:47:58,075 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33647-0x10144f99d900000, quorum=127.0.0.1:50242, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:47:58,100 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-16T12:47:58,101 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=0450ab8807f5,33647,1731761277664 2024-11-16T12:47:58,116 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33647-0x10144f99d900000, quorum=127.0.0.1:50242, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:47:58,116 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34331-0x10144f99d900001, quorum=127.0.0.1:50242, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:47:58,141 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-16T12:47:58,142 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=0450ab8807f5,33647,1731761277664 2024-11-16T12:47:58,144 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-16T12:47:58,146 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-16T12:47:58,146 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-16T12:47:58,146 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-16T12:47:58,146 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 0450ab8807f5,33647,1731761277664 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-16T12:47:58,148 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/0450ab8807f5:0, corePoolSize=5, maxPoolSize=5 2024-11-16T12:47:58,148 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/0450ab8807f5:0, corePoolSize=5, maxPoolSize=5 2024-11-16T12:47:58,148 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/0450ab8807f5:0, corePoolSize=5, maxPoolSize=5 2024-11-16T12:47:58,148 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/0450ab8807f5:0, corePoolSize=5, maxPoolSize=5 2024-11-16T12:47:58,148 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/0450ab8807f5:0, corePoolSize=10, maxPoolSize=10 2024-11-16T12:47:58,149 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:47:58,149 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/0450ab8807f5:0, corePoolSize=2, maxPoolSize=2 2024-11-16T12:47:58,149 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:47:58,150 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731761308150 2024-11-16T12:47:58,151 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-16T12:47:58,151 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-16T12:47:58,151 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T12:47:58,151 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-16T12:47:58,151 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-16T12:47:58,151 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-16T12:47:58,151 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-16T12:47:58,151 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-16T12:47:58,151 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-16T12:47:58,152 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-16T12:47:58,152 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-16T12:47:58,152 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-16T12:47:58,152 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-16T12:47:58,152 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-16T12:47:58,152 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:47:58,152 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/0450ab8807f5:0:becomeActiveMaster-HFileCleaner.large.0-1731761278152,5,FailOnTimeoutGroup] 2024-11-16T12:47:58,152 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/0450ab8807f5:0:becomeActiveMaster-HFileCleaner.small.0-1731761278152,5,FailOnTimeoutGroup] 2024-11-16T12:47:58,152 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-16T12:47:58,153 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-16T12:47:58,152 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-16T12:47:58,153 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-16T12:47:58,153 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-16T12:47:58,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38869 is added to blk_1073741831_1007 (size=1321) 2024-11-16T12:47:58,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741831_1007 (size=1321) 2024-11-16T12:47:58,159 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-16T12:47:58,160 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac 2024-11-16T12:47:58,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741832_1008 (size=32) 2024-11-16T12:47:58,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38869 is added to blk_1073741832_1008 (size=32) 2024-11-16T12:47:58,167 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T12:47:58,168 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-16T12:47:58,170 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-16T12:47:58,170 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:47:58,170 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T12:47:58,170 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-16T12:47:58,172 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-16T12:47:58,172 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:47:58,172 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T12:47:58,173 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-16T12:47:58,174 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-16T12:47:58,174 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:47:58,175 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T12:47:58,175 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-16T12:47:58,176 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-16T12:47:58,176 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:47:58,177 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T12:47:58,177 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-16T12:47:58,178 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/data/hbase/meta/1588230740 2024-11-16T12:47:58,178 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/data/hbase/meta/1588230740 2024-11-16T12:47:58,180 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-16T12:47:58,180 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-16T12:47:58,181 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-16T12:47:58,182 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-16T12:47:58,187 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T12:47:58,188 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=752240, jitterRate=-0.04347856342792511}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-16T12:47:58,188 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731761278167Initializing all the Stores at 1731761278168 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731761278168Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731761278168Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731761278168Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731761278168Cleaning up temporary data from old regions at 1731761278180 (+12 ms)Region opened successfully at 1731761278188 (+8 ms) 2024-11-16T12:47:58,188 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-16T12:47:58,188 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-16T12:47:58,188 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-16T12:47:58,188 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-16T12:47:58,188 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-16T12:47:58,189 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-16T12:47:58,189 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731761278188Disabling compacts and flushes for region at 1731761278188Disabling writes for close at 1731761278188Writing region close event to WAL at 1731761278189 (+1 ms)Closed at 1731761278189 2024-11-16T12:47:58,190 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T12:47:58,190 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-16T12:47:58,190 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-16T12:47:58,191 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-16T12:47:58,192 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-16T12:47:58,231 INFO [RS:0;0450ab8807f5:34331 {}] regionserver.HRegionServer(746): ClusterId : b529f6d0-a01f-468b-87e6-72f2b274a675 2024-11-16T12:47:58,232 DEBUG [RS:0;0450ab8807f5:34331 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-16T12:47:58,256 DEBUG [RS:0;0450ab8807f5:34331 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-16T12:47:58,256 DEBUG [RS:0;0450ab8807f5:34331 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-16T12:47:58,267 DEBUG [RS:0;0450ab8807f5:34331 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-16T12:47:58,268 DEBUG [RS:0;0450ab8807f5:34331 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@af8923, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=0450ab8807f5/172.17.0.2:0 2024-11-16T12:47:58,286 DEBUG [RS:0;0450ab8807f5:34331 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;0450ab8807f5:34331 2024-11-16T12:47:58,286 INFO [RS:0;0450ab8807f5:34331 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-16T12:47:58,286 INFO [RS:0;0450ab8807f5:34331 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-16T12:47:58,286 DEBUG [RS:0;0450ab8807f5:34331 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-16T12:47:58,287 INFO [RS:0;0450ab8807f5:34331 {}] regionserver.HRegionServer(2659): reportForDuty to master=0450ab8807f5,33647,1731761277664 with port=34331, startcode=1731761277809 2024-11-16T12:47:58,287 DEBUG [RS:0;0450ab8807f5:34331 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-16T12:47:58,289 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50841, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-11-16T12:47:58,290 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33647 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 0450ab8807f5,34331,1731761277809 2024-11-16T12:47:58,290 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33647 {}] master.ServerManager(517): Registering regionserver=0450ab8807f5,34331,1731761277809 2024-11-16T12:47:58,291 DEBUG [RS:0;0450ab8807f5:34331 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac 2024-11-16T12:47:58,292 DEBUG [RS:0;0450ab8807f5:34331 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:35641 2024-11-16T12:47:58,292 DEBUG [RS:0;0450ab8807f5:34331 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-16T12:47:58,300 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33647-0x10144f99d900000, quorum=127.0.0.1:50242, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-16T12:47:58,300 DEBUG [RS:0;0450ab8807f5:34331 {}] zookeeper.ZKUtil(111): regionserver:34331-0x10144f99d900001, quorum=127.0.0.1:50242, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/0450ab8807f5,34331,1731761277809 2024-11-16T12:47:58,300 WARN [RS:0;0450ab8807f5:34331 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-16T12:47:58,300 INFO [RS:0;0450ab8807f5:34331 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T12:47:58,300 DEBUG [RS:0;0450ab8807f5:34331 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/WALs/0450ab8807f5,34331,1731761277809 2024-11-16T12:47:58,301 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [0450ab8807f5,34331,1731761277809] 2024-11-16T12:47:58,304 INFO [RS:0;0450ab8807f5:34331 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-16T12:47:58,306 INFO [RS:0;0450ab8807f5:34331 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-16T12:47:58,306 INFO [RS:0;0450ab8807f5:34331 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-16T12:47:58,306 INFO [RS:0;0450ab8807f5:34331 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T12:47:58,306 INFO [RS:0;0450ab8807f5:34331 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-16T12:47:58,310 INFO [RS:0;0450ab8807f5:34331 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-16T12:47:58,310 INFO [RS:0;0450ab8807f5:34331 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-16T12:47:58,311 DEBUG [RS:0;0450ab8807f5:34331 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:47:58,311 DEBUG [RS:0;0450ab8807f5:34331 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:47:58,311 DEBUG [RS:0;0450ab8807f5:34331 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:47:58,311 DEBUG [RS:0;0450ab8807f5:34331 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:47:58,311 DEBUG [RS:0;0450ab8807f5:34331 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:47:58,311 DEBUG [RS:0;0450ab8807f5:34331 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/0450ab8807f5:0, corePoolSize=2, maxPoolSize=2 2024-11-16T12:47:58,311 DEBUG [RS:0;0450ab8807f5:34331 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:47:58,311 DEBUG [RS:0;0450ab8807f5:34331 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:47:58,311 DEBUG [RS:0;0450ab8807f5:34331 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:47:58,311 DEBUG [RS:0;0450ab8807f5:34331 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:47:58,311 DEBUG [RS:0;0450ab8807f5:34331 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:47:58,311 DEBUG [RS:0;0450ab8807f5:34331 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:47:58,311 DEBUG [RS:0;0450ab8807f5:34331 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/0450ab8807f5:0, corePoolSize=3, maxPoolSize=3 2024-11-16T12:47:58,312 DEBUG [RS:0;0450ab8807f5:34331 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/0450ab8807f5:0, corePoolSize=3, maxPoolSize=3 2024-11-16T12:47:58,313 INFO [RS:0;0450ab8807f5:34331 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T12:47:58,313 INFO [RS:0;0450ab8807f5:34331 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T12:47:58,313 INFO [RS:0;0450ab8807f5:34331 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T12:47:58,313 INFO [RS:0;0450ab8807f5:34331 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-16T12:47:58,313 INFO [RS:0;0450ab8807f5:34331 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-16T12:47:58,313 INFO [RS:0;0450ab8807f5:34331 {}] hbase.ChoreService(168): Chore ScheduledChore name=0450ab8807f5,34331,1731761277809-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-16T12:47:58,327 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:47:58,333 INFO [RS:0;0450ab8807f5:34331 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-16T12:47:58,333 INFO [RS:0;0450ab8807f5:34331 {}] hbase.ChoreService(168): Chore ScheduledChore name=0450ab8807f5,34331,1731761277809-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T12:47:58,333 INFO [RS:0;0450ab8807f5:34331 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T12:47:58,333 INFO [RS:0;0450ab8807f5:34331 {}] regionserver.Replication(171): 0450ab8807f5,34331,1731761277809 started 2024-11-16T12:47:58,343 WARN [0450ab8807f5:33647 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-16T12:47:58,350 INFO [RS:0;0450ab8807f5:34331 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T12:47:58,351 INFO [RS:0;0450ab8807f5:34331 {}] regionserver.HRegionServer(1482): Serving as 0450ab8807f5,34331,1731761277809, RpcServer on 0450ab8807f5/172.17.0.2:34331, sessionid=0x10144f99d900001 2024-11-16T12:47:58,351 DEBUG [RS:0;0450ab8807f5:34331 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-16T12:47:58,351 DEBUG [RS:0;0450ab8807f5:34331 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 0450ab8807f5,34331,1731761277809 2024-11-16T12:47:58,351 DEBUG [RS:0;0450ab8807f5:34331 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '0450ab8807f5,34331,1731761277809' 2024-11-16T12:47:58,351 DEBUG [RS:0;0450ab8807f5:34331 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-16T12:47:58,352 DEBUG [RS:0;0450ab8807f5:34331 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-16T12:47:58,352 DEBUG [RS:0;0450ab8807f5:34331 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-16T12:47:58,352 DEBUG [RS:0;0450ab8807f5:34331 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-16T12:47:58,352 DEBUG [RS:0;0450ab8807f5:34331 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 0450ab8807f5,34331,1731761277809 2024-11-16T12:47:58,352 DEBUG [RS:0;0450ab8807f5:34331 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '0450ab8807f5,34331,1731761277809' 2024-11-16T12:47:58,352 DEBUG [RS:0;0450ab8807f5:34331 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-16T12:47:58,353 DEBUG [RS:0;0450ab8807f5:34331 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-16T12:47:58,353 DEBUG [RS:0;0450ab8807f5:34331 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-16T12:47:58,353 INFO [RS:0;0450ab8807f5:34331 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-16T12:47:58,353 INFO [RS:0;0450ab8807f5:34331 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-16T12:47:58,456 INFO [RS:0;0450ab8807f5:34331 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0450ab8807f5%2C34331%2C1731761277809, suffix=, logDir=hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/WALs/0450ab8807f5,34331,1731761277809, archiveDir=hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/oldWALs, maxLogs=32 2024-11-16T12:47:58,457 INFO [RS:0;0450ab8807f5:34331 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0450ab8807f5%2C34331%2C1731761277809.1731761278457 2024-11-16T12:47:58,464 INFO [RS:0;0450ab8807f5:34331 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/WALs/0450ab8807f5,34331,1731761277809/0450ab8807f5%2C34331%2C1731761277809.1731761278457 2024-11-16T12:47:58,465 DEBUG [RS:0;0450ab8807f5:34331 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33753:33753),(127.0.0.1/127.0.0.1:46113:46113)] 2024-11-16T12:47:58,524 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:47:58,593 DEBUG [0450ab8807f5:33647 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-16T12:47:58,594 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=0450ab8807f5,34331,1731761277809 2024-11-16T12:47:58,595 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 0450ab8807f5,34331,1731761277809, state=OPENING 2024-11-16T12:47:58,616 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-16T12:47:58,625 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34331-0x10144f99d900001, quorum=127.0.0.1:50242, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:47:58,625 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33647-0x10144f99d900000, quorum=127.0.0.1:50242, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:47:58,625 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-16T12:47:58,626 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T12:47:58,626 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T12:47:58,626 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=0450ab8807f5,34331,1731761277809}] 2024-11-16T12:47:58,780 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-16T12:47:58,782 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59629, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-16T12:47:58,786 INFO [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-16T12:47:58,786 INFO [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T12:47:58,788 INFO [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0450ab8807f5%2C34331%2C1731761277809.meta, suffix=.meta, logDir=hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/WALs/0450ab8807f5,34331,1731761277809, archiveDir=hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/oldWALs, maxLogs=32 2024-11-16T12:47:58,789 INFO [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 0450ab8807f5%2C34331%2C1731761277809.meta.1731761278789.meta 2024-11-16T12:47:58,795 INFO [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/WALs/0450ab8807f5,34331,1731761277809/0450ab8807f5%2C34331%2C1731761277809.meta.1731761278789.meta 2024-11-16T12:47:58,796 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33753:33753),(127.0.0.1/127.0.0.1:46113:46113)] 2024-11-16T12:47:58,797 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-16T12:47:58,797 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-16T12:47:58,797 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-16T12:47:58,798 INFO [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-16T12:47:58,798 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-16T12:47:58,798 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T12:47:58,798 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-16T12:47:58,798 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-16T12:47:58,799 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-16T12:47:58,800 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-16T12:47:58,800 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:47:58,801 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T12:47:58,801 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-16T12:47:58,802 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-16T12:47:58,802 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:47:58,803 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T12:47:58,803 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-16T12:47:58,804 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-16T12:47:58,804 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:47:58,804 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T12:47:58,805 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-16T12:47:58,806 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-16T12:47:58,806 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:47:58,806 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T12:47:58,806 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-16T12:47:58,807 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/data/hbase/meta/1588230740 2024-11-16T12:47:58,809 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/data/hbase/meta/1588230740 2024-11-16T12:47:58,810 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-16T12:47:58,810 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-16T12:47:58,811 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-16T12:47:58,813 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-16T12:47:58,814 INFO [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=709073, jitterRate=-0.09836830198764801}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-16T12:47:58,814 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-16T12:47:58,815 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731761278798Writing region info on filesystem at 1731761278798Initializing all the Stores at 1731761278799 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731761278799Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731761278799Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731761278799Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731761278799Cleaning up temporary data from old regions at 1731761278810 (+11 ms)Running coprocessor post-open hooks at 1731761278814 (+4 ms)Region opened successfully at 1731761278815 (+1 ms) 2024-11-16T12:47:58,816 INFO [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731761278780 2024-11-16T12:47:58,818 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-16T12:47:58,818 INFO [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-16T12:47:58,820 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=0450ab8807f5,34331,1731761277809 2024-11-16T12:47:58,821 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 0450ab8807f5,34331,1731761277809, state=OPEN 2024-11-16T12:47:58,883 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33647-0x10144f99d900000, quorum=127.0.0.1:50242, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-16T12:47:58,883 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34331-0x10144f99d900001, quorum=127.0.0.1:50242, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-16T12:47:58,883 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=0450ab8807f5,34331,1731761277809 2024-11-16T12:47:58,883 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T12:47:58,883 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T12:47:58,887 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-16T12:47:58,887 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=0450ab8807f5,34331,1731761277809 in 258 msec 2024-11-16T12:47:58,890 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-16T12:47:58,890 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 697 msec 2024-11-16T12:47:58,891 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T12:47:58,891 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-16T12:47:58,893 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T12:47:58,893 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=0450ab8807f5,34331,1731761277809, seqNum=-1] 2024-11-16T12:47:58,893 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T12:47:58,895 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58689, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T12:47:58,900 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 754 msec 2024-11-16T12:47:58,900 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731761278900, completionTime=-1 2024-11-16T12:47:58,900 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-16T12:47:58,900 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-16T12:47:58,902 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-16T12:47:58,902 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731761338902 2024-11-16T12:47:58,902 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731761398902 2024-11-16T12:47:58,902 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-11-16T12:47:58,903 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0450ab8807f5,33647,1731761277664-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T12:47:58,903 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0450ab8807f5,33647,1731761277664-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T12:47:58,903 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0450ab8807f5,33647,1731761277664-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T12:47:58,903 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-0450ab8807f5:33647, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T12:47:58,903 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-16T12:47:58,903 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-16T12:47:58,905 DEBUG [master/0450ab8807f5:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-16T12:47:58,907 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.041sec 2024-11-16T12:47:58,908 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-16T12:47:58,908 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-16T12:47:58,908 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-16T12:47:58,908 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-16T12:47:58,908 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-16T12:47:58,908 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0450ab8807f5,33647,1731761277664-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-16T12:47:58,908 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0450ab8807f5,33647,1731761277664-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-16T12:47:58,911 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-16T12:47:58,911 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-16T12:47:58,911 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0450ab8807f5,33647,1731761277664-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T12:47:58,932 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7c83d523, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T12:47:58,932 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 0450ab8807f5,33647,-1 for getting cluster id 2024-11-16T12:47:58,932 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-16T12:47:58,935 DEBUG [HMaster-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'b529f6d0-a01f-468b-87e6-72f2b274a675' 2024-11-16T12:47:58,935 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-16T12:47:58,936 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "b529f6d0-a01f-468b-87e6-72f2b274a675" 2024-11-16T12:47:58,936 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6eec9ec9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T12:47:58,936 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [0450ab8807f5,33647,-1] 2024-11-16T12:47:58,936 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-16T12:47:58,937 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T12:47:58,939 INFO [HMaster-EventLoopGroup-10-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33586, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-16T12:47:58,940 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5a382d25, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T12:47:58,941 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T12:47:58,942 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=0450ab8807f5,34331,1731761277809, seqNum=-1] 2024-11-16T12:47:58,943 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T12:47:58,945 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37430, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T12:47:58,947 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=0450ab8807f5,33647,1731761277664 2024-11-16T12:47:58,948 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T12:47:58,951 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-16T12:47:58,951 INFO [Time-limited test {}] wal.TestLogRolling(320): Starting testLogRollOnPipelineRestart 2024-11-16T12:47:58,952 INFO [Time-limited test {}] wal.TestLogRolling(323): Replication=2 2024-11-16T12:47:58,952 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-16T12:47:58,953 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.AsyncConnectionImpl(321): The fetched master address is 0450ab8807f5,33647,1731761277664 2024-11-16T12:47:58,953 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@4bdf6a61 2024-11-16T12:47:58,953 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-16T12:47:58,955 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33590, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-16T12:47:58,956 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33647 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-16T12:47:58,956 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33647 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-16T12:47:58,956 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33647 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnPipelineRestart', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-16T12:47:58,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33647 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart 2024-11-16T12:47:58,960 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_PRE_OPERATION 2024-11-16T12:47:58,960 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:47:58,960 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33647 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnPipelineRestart" procId is: 4 2024-11-16T12:47:58,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33647 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-16T12:47:58,961 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-16T12:47:58,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741835_1011 (size=395) 2024-11-16T12:47:58,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38869 is added to blk_1073741835_1011 (size=395) 2024-11-16T12:47:58,970 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 3498cec71140f1b91f56b771484f3868, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1731761278956.3498cec71140f1b91f56b771484f3868.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnPipelineRestart', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac 2024-11-16T12:47:58,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45771 is added to blk_1073741836_1012 (size=78) 2024-11-16T12:47:58,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38869 is added to blk_1073741836_1012 (size=78) 2024-11-16T12:47:58,979 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1731761278956.3498cec71140f1b91f56b771484f3868.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T12:47:58,979 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1722): Closing 3498cec71140f1b91f56b771484f3868, disabling compactions & flushes 2024-11-16T12:47:58,979 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1731761278956.3498cec71140f1b91f56b771484f3868. 2024-11-16T12:47:58,979 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731761278956.3498cec71140f1b91f56b771484f3868. 2024-11-16T12:47:58,979 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731761278956.3498cec71140f1b91f56b771484f3868. after waiting 0 ms 2024-11-16T12:47:58,979 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1731761278956.3498cec71140f1b91f56b771484f3868. 2024-11-16T12:47:58,979 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731761278956.3498cec71140f1b91f56b771484f3868. 2024-11-16T12:47:58,979 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1676): Region close journal for 3498cec71140f1b91f56b771484f3868: Waiting for close lock at 1731761278979Disabling compacts and flushes for region at 1731761278979Disabling writes for close at 1731761278979Writing region close event to WAL at 1731761278979Closed at 1731761278979 2024-11-16T12:47:58,981 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ADD_TO_META 2024-11-16T12:47:58,981 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnPipelineRestart,,1731761278956.3498cec71140f1b91f56b771484f3868.","families":{"info":[{"qualifier":"regioninfo","vlen":77,"tag":[],"timestamp":"1731761278981"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731761278981"}]},"ts":"1731761278981"} 2024-11-16T12:47:58,984 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-16T12:47:58,985 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-16T12:47:58,986 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731761278985"}]},"ts":"1731761278985"} 2024-11-16T12:47:58,988 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLING in hbase:meta 2024-11-16T12:47:58,989 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=3498cec71140f1b91f56b771484f3868, ASSIGN}] 2024-11-16T12:47:58,990 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=3498cec71140f1b91f56b771484f3868, ASSIGN 2024-11-16T12:47:58,991 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=3498cec71140f1b91f56b771484f3868, ASSIGN; state=OFFLINE, location=0450ab8807f5,34331,1731761277809; forceNewPlan=false, retain=false 2024-11-16T12:47:59,142 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=3498cec71140f1b91f56b771484f3868, regionState=OPENING, regionLocation=0450ab8807f5,34331,1731761277809 2024-11-16T12:47:59,145 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=3498cec71140f1b91f56b771484f3868, ASSIGN because future has completed 2024-11-16T12:47:59,145 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3498cec71140f1b91f56b771484f3868, server=0450ab8807f5,34331,1731761277809}] 2024-11-16T12:47:59,302 INFO [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnPipelineRestart,,1731761278956.3498cec71140f1b91f56b771484f3868. 2024-11-16T12:47:59,302 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 3498cec71140f1b91f56b771484f3868, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1731761278956.3498cec71140f1b91f56b771484f3868.', STARTKEY => '', ENDKEY => ''} 2024-11-16T12:47:59,303 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnPipelineRestart 3498cec71140f1b91f56b771484f3868 2024-11-16T12:47:59,303 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1731761278956.3498cec71140f1b91f56b771484f3868.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T12:47:59,303 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 3498cec71140f1b91f56b771484f3868 2024-11-16T12:47:59,303 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 3498cec71140f1b91f56b771484f3868 2024-11-16T12:47:59,305 INFO [StoreOpener-3498cec71140f1b91f56b771484f3868-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 3498cec71140f1b91f56b771484f3868 2024-11-16T12:47:59,307 INFO [StoreOpener-3498cec71140f1b91f56b771484f3868-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3498cec71140f1b91f56b771484f3868 columnFamilyName info 2024-11-16T12:47:59,307 DEBUG [StoreOpener-3498cec71140f1b91f56b771484f3868-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:47:59,307 INFO [StoreOpener-3498cec71140f1b91f56b771484f3868-1 {}] regionserver.HStore(327): Store=3498cec71140f1b91f56b771484f3868/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T12:47:59,308 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 3498cec71140f1b91f56b771484f3868 2024-11-16T12:47:59,309 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/data/default/TestLogRolling-testLogRollOnPipelineRestart/3498cec71140f1b91f56b771484f3868 2024-11-16T12:47:59,309 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/data/default/TestLogRolling-testLogRollOnPipelineRestart/3498cec71140f1b91f56b771484f3868 2024-11-16T12:47:59,310 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 3498cec71140f1b91f56b771484f3868 2024-11-16T12:47:59,310 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 3498cec71140f1b91f56b771484f3868 2024-11-16T12:47:59,312 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 3498cec71140f1b91f56b771484f3868 2024-11-16T12:47:59,314 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/data/default/TestLogRolling-testLogRollOnPipelineRestart/3498cec71140f1b91f56b771484f3868/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T12:47:59,315 INFO [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 3498cec71140f1b91f56b771484f3868; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=805232, jitterRate=0.023906439542770386}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-16T12:47:59,315 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 3498cec71140f1b91f56b771484f3868 2024-11-16T12:47:59,316 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 3498cec71140f1b91f56b771484f3868: Running coprocessor pre-open hook at 1731761279303Writing region info on filesystem at 1731761279303Initializing all the Stores at 1731761279304 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731761279305 (+1 ms)Cleaning up temporary data from old regions at 1731761279310 (+5 ms)Running coprocessor post-open hooks at 1731761279315 (+5 ms)Region opened successfully at 1731761279315 2024-11-16T12:47:59,317 INFO [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnPipelineRestart,,1731761278956.3498cec71140f1b91f56b771484f3868., pid=6, masterSystemTime=1731761279298 2024-11-16T12:47:59,319 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnPipelineRestart,,1731761278956.3498cec71140f1b91f56b771484f3868. 2024-11-16T12:47:59,319 INFO [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnPipelineRestart,,1731761278956.3498cec71140f1b91f56b771484f3868. 2024-11-16T12:47:59,320 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=3498cec71140f1b91f56b771484f3868, regionState=OPEN, openSeqNum=2, regionLocation=0450ab8807f5,34331,1731761277809 2024-11-16T12:47:59,322 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3498cec71140f1b91f56b771484f3868, server=0450ab8807f5,34331,1731761277809 because future has completed 2024-11-16T12:47:59,327 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-16T12:47:59,327 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 3498cec71140f1b91f56b771484f3868, server=0450ab8807f5,34331,1731761277809 in 179 msec 2024-11-16T12:47:59,328 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:47:59,330 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-16T12:47:59,330 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=3498cec71140f1b91f56b771484f3868, ASSIGN in 339 msec 2024-11-16T12:47:59,331 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-16T12:47:59,332 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731761279331"}]},"ts":"1731761279331"} 2024-11-16T12:47:59,334 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLED in hbase:meta 2024-11-16T12:47:59,336 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_POST_OPERATION 2024-11-16T12:47:59,338 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart in 380 msec 2024-11-16T12:47:59,525 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:00,329 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:00,526 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:01,329 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:01,527 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:02,330 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:02,454 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-16T12:48:02,478 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:48:02,478 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:48:02,479 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:48:02,479 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:48:02,479 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:48:02,480 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:48:02,484 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:48:02,484 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:48:02,485 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:48:02,488 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:48:02,528 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:03,331 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:03,529 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:04,304 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-16T12:48:04,304 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnPipelineRestart' 2024-11-16T12:48:04,332 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:04,530 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:05,333 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:05,530 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:06,333 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:06,531 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:06,950 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-16T12:48:06,950 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-16T12:48:06,951 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-16T12:48:06,951 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart Metrics about Tables on a single HBase RegionServer 2024-11-16T12:48:06,951 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-16T12:48:06,951 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-16T12:48:06,952 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-16T12:48:06,952 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-16T12:48:07,334 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:07,532 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:08,335 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:08,532 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:09,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33647 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-16T12:48:09,026 INFO [RPCClient-NioEventLoopGroup-4-15 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnPipelineRestart completed 2024-11-16T12:48:09,026 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnPipelineRestart,, stopping at row=TestLogRolling-testLogRollOnPipelineRestart ,, for max=2147483647 with caching=100 2024-11-16T12:48:09,031 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnPipelineRestart 2024-11-16T12:48:09,031 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnPipelineRestart,,1731761278956.3498cec71140f1b91f56b771484f3868. 2024-11-16T12:48:09,036 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnPipelineRestart', row='row1002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnPipelineRestart,,1731761278956.3498cec71140f1b91f56b771484f3868., hostname=0450ab8807f5,34331,1731761277809, seqNum=2] 2024-11-16T12:48:09,335 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:09,533 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:10,336 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:10,534 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:11,039 INFO [Time-limited test {}] wal.TestLogRolling(360): log.getCurrentFileName()): hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/WALs/0450ab8807f5,34331,1731761277809/0450ab8807f5%2C34331%2C1731761277809.1731761278457 2024-11-16T12:48:11,040 WARN [ResponseProcessor for block BP-1070433440-172.17.0.2-1731761275903:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1070433440-172.17.0.2-1731761275903:blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:48:11,040 WARN [ResponseProcessor for block BP-1070433440-172.17.0.2-1731761275903:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1070433440-172.17.0.2-1731761275903:blk_1073741834_1010 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:48:11,040 WARN [ResponseProcessor for block BP-1070433440-172.17.0.2-1731761275903:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1070433440-172.17.0.2-1731761275903:blk_1073741830_1006 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:48:11,040 WARN [DataStreamer for file /user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/WALs/0450ab8807f5,34331,1731761277809/0450ab8807f5%2C34331%2C1731761277809.meta.1731761278789.meta block BP-1070433440-172.17.0.2-1731761275903:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1070433440-172.17.0.2-1731761275903:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38869,DS-4d01d5cb-26c9-436f-9935-2950565c486b,DISK], DatanodeInfoWithStorage[127.0.0.1:45771,DS-2a54cb75-d9d7-4992-bf7f-7983949a16b1,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38869,DS-4d01d5cb-26c9-436f-9935-2950565c486b,DISK]) is bad. 2024-11-16T12:48:11,040 WARN [DataStreamer for file /user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/MasterData/WALs/0450ab8807f5,33647,1731761277664/0450ab8807f5%2C33647%2C1731761277664.1731761277928 block BP-1070433440-172.17.0.2-1731761275903:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1070433440-172.17.0.2-1731761275903:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38869,DS-4d01d5cb-26c9-436f-9935-2950565c486b,DISK], DatanodeInfoWithStorage[127.0.0.1:45771,DS-2a54cb75-d9d7-4992-bf7f-7983949a16b1,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38869,DS-4d01d5cb-26c9-436f-9935-2950565c486b,DISK]) is bad. 2024-11-16T12:48:11,041 WARN [DataStreamer for file /user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/WALs/0450ab8807f5,34331,1731761277809/0450ab8807f5%2C34331%2C1731761277809.1731761278457 block BP-1070433440-172.17.0.2-1731761275903:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1070433440-172.17.0.2-1731761275903:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38869,DS-4d01d5cb-26c9-436f-9935-2950565c486b,DISK], DatanodeInfoWithStorage[127.0.0.1:45771,DS-2a54cb75-d9d7-4992-bf7f-7983949a16b1,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38869,DS-4d01d5cb-26c9-436f-9935-2950565c486b,DISK]) is bad. 2024-11-16T12:48:11,041 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1186575157_22 at /127.0.0.1:51122 [Receiving block BP-1070433440-172.17.0.2-1731761275903:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:38869:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51122 dst: /127.0.0.1:38869 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T12:48:11,041 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-372225252_22 at /127.0.0.1:51070 [Receiving block BP-1070433440-172.17.0.2-1731761275903:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:38869:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51070 dst: /127.0.0.1:38869 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T12:48:11,041 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1186575157_22 at /127.0.0.1:51114 [Receiving block BP-1070433440-172.17.0.2-1731761275903:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:38869:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51114 dst: /127.0.0.1:38869 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T12:48:11,041 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1186575157_22 at /127.0.0.1:41484 [Receiving block BP-1070433440-172.17.0.2-1731761275903:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:45771:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41484 dst: /127.0.0.1:45771 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T12:48:11,041 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-372225252_22 at /127.0.0.1:41450 [Receiving block BP-1070433440-172.17.0.2-1731761275903:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:45771:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41450 dst: /127.0.0.1:45771 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T12:48:11,042 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1186575157_22 at /127.0.0.1:41482 [Receiving block BP-1070433440-172.17.0.2-1731761275903:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:45771:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41482 dst: /127.0.0.1:45771 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T12:48:11,115 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@269fb75c{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T12:48:11,115 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@12f241e2{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T12:48:11,116 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T12:48:11,116 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6bf577b6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T12:48:11,116 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@72852221{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38e4ecdb-024e-29a1-2b9a-2c293ee355e5/hadoop.log.dir/,STOPPED} 2024-11-16T12:48:11,117 WARN [BP-1070433440-172.17.0.2-1731761275903 heartbeating to localhost/127.0.0.1:35641 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T12:48:11,117 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T12:48:11,117 WARN [BP-1070433440-172.17.0.2-1731761275903 heartbeating to localhost/127.0.0.1:35641 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1070433440-172.17.0.2-1731761275903 (Datanode Uuid 601e6cc2-940d-4257-b079-927fb951f851) service to localhost/127.0.0.1:35641 2024-11-16T12:48:11,117 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T12:48:11,118 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38e4ecdb-024e-29a1-2b9a-2c293ee355e5/cluster_3a137a3e-714c-a10c-73f2-ab0b29e449ad/data/data3/current/BP-1070433440-172.17.0.2-1731761275903 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T12:48:11,118 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38e4ecdb-024e-29a1-2b9a-2c293ee355e5/cluster_3a137a3e-714c-a10c-73f2-ab0b29e449ad/data/data4/current/BP-1070433440-172.17.0.2-1731761275903 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T12:48:11,118 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T12:48:11,129 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T12:48:11,133 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T12:48:11,134 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T12:48:11,134 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T12:48:11,134 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-16T12:48:11,134 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@49f94f8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38e4ecdb-024e-29a1-2b9a-2c293ee355e5/hadoop.log.dir/,AVAILABLE} 2024-11-16T12:48:11,135 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@64685bd7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T12:48:11,230 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@652ca842{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38e4ecdb-024e-29a1-2b9a-2c293ee355e5/java.io.tmpdir/jetty-localhost-43667-hadoop-hdfs-3_4_1-tests_jar-_-any-12813831520138758111/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T12:48:11,230 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5d802677{HTTP/1.1, (http/1.1)}{localhost:43667} 2024-11-16T12:48:11,230 INFO [Time-limited test {}] server.Server(415): Started @170117ms 2024-11-16T12:48:11,231 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T12:48:11,249 WARN [ResponseProcessor for block BP-1070433440-172.17.0.2-1731761275903:blk_1073741833_1014 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1070433440-172.17.0.2-1731761275903:blk_1073741833_1014 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:48:11,249 WARN [ResponseProcessor for block BP-1070433440-172.17.0.2-1731761275903:blk_1073741830_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1070433440-172.17.0.2-1731761275903:blk_1073741830_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:48:11,249 WARN [ResponseProcessor for block BP-1070433440-172.17.0.2-1731761275903:blk_1073741834_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1070433440-172.17.0.2-1731761275903:blk_1073741834_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:48:11,249 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1186575157_22 at /127.0.0.1:60264 [Receiving block BP-1070433440-172.17.0.2-1731761275903:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:45771:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60264 dst: /127.0.0.1:45771 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T12:48:11,249 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1186575157_22 at /127.0.0.1:60266 [Receiving block BP-1070433440-172.17.0.2-1731761275903:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:45771:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60266 dst: /127.0.0.1:45771 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T12:48:11,249 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-372225252_22 at /127.0.0.1:60282 [Receiving block BP-1070433440-172.17.0.2-1731761275903:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:45771:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60282 dst: /127.0.0.1:45771 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T12:48:11,253 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3c9f0fbb{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T12:48:11,254 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4958e5b2{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T12:48:11,254 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T12:48:11,254 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@47946b20{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T12:48:11,254 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@64434c96{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38e4ecdb-024e-29a1-2b9a-2c293ee355e5/hadoop.log.dir/,STOPPED} 2024-11-16T12:48:11,255 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T12:48:11,255 WARN [BP-1070433440-172.17.0.2-1731761275903 heartbeating to localhost/127.0.0.1:35641 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T12:48:11,255 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T12:48:11,255 WARN [BP-1070433440-172.17.0.2-1731761275903 heartbeating to localhost/127.0.0.1:35641 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1070433440-172.17.0.2-1731761275903 (Datanode Uuid 16dc9a8c-507c-4b32-ac6d-c66c3aa68ced) service to localhost/127.0.0.1:35641 2024-11-16T12:48:11,256 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38e4ecdb-024e-29a1-2b9a-2c293ee355e5/cluster_3a137a3e-714c-a10c-73f2-ab0b29e449ad/data/data1/current/BP-1070433440-172.17.0.2-1731761275903 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T12:48:11,256 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38e4ecdb-024e-29a1-2b9a-2c293ee355e5/cluster_3a137a3e-714c-a10c-73f2-ab0b29e449ad/data/data2/current/BP-1070433440-172.17.0.2-1731761275903 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T12:48:11,256 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T12:48:11,268 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T12:48:11,272 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T12:48:11,273 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T12:48:11,274 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T12:48:11,274 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-16T12:48:11,275 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@64e298ce{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38e4ecdb-024e-29a1-2b9a-2c293ee355e5/hadoop.log.dir/,AVAILABLE} 2024-11-16T12:48:11,275 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@61aceb4d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T12:48:11,337 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:11,388 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4e62ddb8{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38e4ecdb-024e-29a1-2b9a-2c293ee355e5/java.io.tmpdir/jetty-localhost-41083-hadoop-hdfs-3_4_1-tests_jar-_-any-7447593600186317954/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T12:48:11,389 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7790ff99{HTTP/1.1, (http/1.1)}{localhost:41083} 2024-11-16T12:48:11,389 INFO [Time-limited test {}] server.Server(415): Started @170276ms 2024-11-16T12:48:11,390 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T12:48:11,535 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:11,669 WARN [Thread-1336 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T12:48:11,671 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x71247ce1311c6cbe with lease ID 0xa345d0ec9465f532: from storage DS-4d01d5cb-26c9-436f-9935-2950565c486b node DatanodeRegistration(127.0.0.1:38907, datanodeUuid=601e6cc2-940d-4257-b079-927fb951f851, infoPort=36803, infoSecurePort=0, ipcPort=36319, storageInfo=lv=-57;cid=testClusterID;nsid=1286542615;c=1731761275903), blocks: 6, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-16T12:48:11,672 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x71247ce1311c6cbe with lease ID 0xa345d0ec9465f532: from storage DS-c2925da7-9402-41e8-b796-28a9a00de2c3 node DatanodeRegistration(127.0.0.1:38907, datanodeUuid=601e6cc2-940d-4257-b079-927fb951f851, infoPort=36803, infoSecurePort=0, ipcPort=36319, storageInfo=lv=-57;cid=testClusterID;nsid=1286542615;c=1731761275903), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T12:48:11,835 WARN [Thread-1356 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T12:48:11,838 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5310b4a42b612bb1 with lease ID 0xa345d0ec9465f533: from storage DS-2a54cb75-d9d7-4992-bf7f-7983949a16b1 node DatanodeRegistration(127.0.0.1:45985, datanodeUuid=16dc9a8c-507c-4b32-ac6d-c66c3aa68ced, infoPort=38371, infoSecurePort=0, ipcPort=42819, storageInfo=lv=-57;cid=testClusterID;nsid=1286542615;c=1731761275903), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T12:48:11,838 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5310b4a42b612bb1 with lease ID 0xa345d0ec9465f533: from storage DS-cc63f746-af09-442a-80bd-397284130258 node DatanodeRegistration(127.0.0.1:45985, datanodeUuid=16dc9a8c-507c-4b32-ac6d-c66c3aa68ced, infoPort=38371, infoSecurePort=0, ipcPort=42819, storageInfo=lv=-57;cid=testClusterID;nsid=1286542615;c=1731761275903), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T12:48:12,338 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:12,436 INFO [Time-limited test {}] wal.TestLogRolling(372): Data Nodes restarted 2024-11-16T12:48:12,439 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1002 2024-11-16T12:48:12,441 ERROR [FSHLog-0-hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac-prefix:0450ab8807f5,34331,1731761277809 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45771,DS-2a54cb75-d9d7-4992-bf7f-7983949a16b1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:48:12,441 WARN [FSHLog-0-hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac-prefix:0450ab8807f5,34331,1731761277809 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45771,DS-2a54cb75-d9d7-4992-bf7f-7983949a16b1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:48:12,441 DEBUG [regionserver/0450ab8807f5:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 0450ab8807f5%2C34331%2C1731761277809:(num 1731761278457) roll requested 2024-11-16T12:48:12,441 INFO [regionserver/0450ab8807f5:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0450ab8807f5%2C34331%2C1731761277809.1731761292441 2024-11-16T12:48:12,456 DEBUG [regionserver/0450ab8807f5:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/WALs/0450ab8807f5,34331,1731761277809/0450ab8807f5%2C34331%2C1731761277809.1731761278457 newFile=hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/WALs/0450ab8807f5,34331,1731761277809/0450ab8807f5%2C34331%2C1731761277809.1731761292441 2024-11-16T12:48:12,456 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:48:12,456 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:48:12,456 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:48:12,456 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:48:12,457 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:48:12,457 INFO [regionserver/0450ab8807f5:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/WALs/0450ab8807f5,34331,1731761277809/0450ab8807f5%2C34331%2C1731761277809.1731761278457 with entries=2, filesize=1.59 KB; new WAL /user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/WALs/0450ab8807f5,34331,1731761277809/0450ab8807f5%2C34331%2C1731761277809.1731761292441 2024-11-16T12:48:12,457 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45771,DS-2a54cb75-d9d7-4992-bf7f-7983949a16b1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:48:12,457 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45771,DS-2a54cb75-d9d7-4992-bf7f-7983949a16b1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:48:12,457 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/WALs/0450ab8807f5,34331,1731761277809/0450ab8807f5%2C34331%2C1731761277809.1731761278457 2024-11-16T12:48:12,458 WARN [IPC Server handler 0 on default port 35641 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/WALs/0450ab8807f5,34331,1731761277809/0450ab8807f5%2C34331%2C1731761277809.1731761278457 has not been closed. Lease recovery is in progress. RecoveryId = 1017 for block blk_1073741833_1014 2024-11-16T12:48:12,458 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/WALs/0450ab8807f5,34331,1731761277809/0450ab8807f5%2C34331%2C1731761277809.1731761278457 after 1ms 2024-11-16T12:48:12,467 DEBUG [regionserver/0450ab8807f5:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38371:38371),(127.0.0.1/127.0.0.1:36803:36803)] 2024-11-16T12:48:12,467 DEBUG [regionserver/0450ab8807f5:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/WALs/0450ab8807f5,34331,1731761277809/0450ab8807f5%2C34331%2C1731761277809.1731761278457 is not closed yet, will try archiving it next time 2024-11-16T12:48:12,535 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:13,339 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:13,536 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:14,339 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:14,471 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1003 2024-11-16T12:48:14,537 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:14,673 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741833_1014: GenerationStamp not matched, existing replica is blk_1073741833_1009 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-16T12:48:15,340 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:15,538 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:16,341 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:16,459 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/WALs/0450ab8807f5,34331,1731761277809/0450ab8807f5%2C34331%2C1731761277809.1731761278457 after 4002ms 2024-11-16T12:48:16,475 WARN [ResponseProcessor for block BP-1070433440-172.17.0.2-1731761275903:blk_1073741837_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1070433440-172.17.0.2-1731761275903:blk_1073741837_1016 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:48:16,476 WARN [DataStreamer for file /user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/WALs/0450ab8807f5,34331,1731761277809/0450ab8807f5%2C34331%2C1731761277809.1731761292441 block BP-1070433440-172.17.0.2-1731761275903:blk_1073741837_1016 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1070433440-172.17.0.2-1731761275903:blk_1073741837_1016 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45985,DS-2a54cb75-d9d7-4992-bf7f-7983949a16b1,DISK], DatanodeInfoWithStorage[127.0.0.1:38907,DS-4d01d5cb-26c9-436f-9935-2950565c486b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45985,DS-2a54cb75-d9d7-4992-bf7f-7983949a16b1,DISK]) is bad. 2024-11-16T12:48:16,476 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1186575157_22 at /127.0.0.1:54690 [Receiving block BP-1070433440-172.17.0.2-1731761275903:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:45985:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54690 dst: /127.0.0.1:45985 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T12:48:16,476 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1186575157_22 at /127.0.0.1:47440 [Receiving block BP-1070433440-172.17.0.2-1731761275903:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:38907:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47440 dst: /127.0.0.1:38907 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T12:48:16,518 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4e62ddb8{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T12:48:16,518 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7790ff99{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T12:48:16,518 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T12:48:16,518 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@61aceb4d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T12:48:16,518 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@64e298ce{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38e4ecdb-024e-29a1-2b9a-2c293ee355e5/hadoop.log.dir/,STOPPED} 2024-11-16T12:48:16,520 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T12:48:16,520 WARN [BP-1070433440-172.17.0.2-1731761275903 heartbeating to localhost/127.0.0.1:35641 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T12:48:16,520 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T12:48:16,520 WARN [BP-1070433440-172.17.0.2-1731761275903 heartbeating to localhost/127.0.0.1:35641 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1070433440-172.17.0.2-1731761275903 (Datanode Uuid 16dc9a8c-507c-4b32-ac6d-c66c3aa68ced) service to localhost/127.0.0.1:35641 2024-11-16T12:48:16,520 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38e4ecdb-024e-29a1-2b9a-2c293ee355e5/cluster_3a137a3e-714c-a10c-73f2-ab0b29e449ad/data/data1/current/BP-1070433440-172.17.0.2-1731761275903 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T12:48:16,520 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38e4ecdb-024e-29a1-2b9a-2c293ee355e5/cluster_3a137a3e-714c-a10c-73f2-ab0b29e449ad/data/data2/current/BP-1070433440-172.17.0.2-1731761275903 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T12:48:16,521 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T12:48:16,528 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T12:48:16,531 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T12:48:16,535 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T12:48:16,535 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T12:48:16,535 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-16T12:48:16,536 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@21ffcd24{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38e4ecdb-024e-29a1-2b9a-2c293ee355e5/hadoop.log.dir/,AVAILABLE} 2024-11-16T12:48:16,536 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@16fecb8d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T12:48:16,539 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:16,646 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@39ebff59{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38e4ecdb-024e-29a1-2b9a-2c293ee355e5/java.io.tmpdir/jetty-localhost-37861-hadoop-hdfs-3_4_1-tests_jar-_-any-11514524733872787610/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T12:48:16,646 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@160f7ba9{HTTP/1.1, (http/1.1)}{localhost:37861} 2024-11-16T12:48:16,646 INFO [Time-limited test {}] server.Server(415): Started @175533ms 2024-11-16T12:48:16,647 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T12:48:16,671 WARN [ResponseProcessor for block BP-1070433440-172.17.0.2-1731761275903:blk_1073741837_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1070433440-172.17.0.2-1731761275903:blk_1073741837_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:48:16,671 WARN [BP-1070433440-172.17.0.2-1731761275903 heartbeating to localhost/127.0.0.1:35641 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1070433440-172.17.0.2-1731761275903 (Datanode Uuid 601e6cc2-940d-4257-b079-927fb951f851) service to localhost/127.0.0.1:35641 2024-11-16T12:48:16,672 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1186575157_22 at /127.0.0.1:47448 [Receiving block BP-1070433440-172.17.0.2-1731761275903:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:38907:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47448 dst: /127.0.0.1:38907 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T12:48:16,672 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38e4ecdb-024e-29a1-2b9a-2c293ee355e5/cluster_3a137a3e-714c-a10c-73f2-ab0b29e449ad/data/data3/current/BP-1070433440-172.17.0.2-1731761275903 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T12:48:16,672 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38e4ecdb-024e-29a1-2b9a-2c293ee355e5/cluster_3a137a3e-714c-a10c-73f2-ab0b29e449ad/data/data4/current/BP-1070433440-172.17.0.2-1731761275903 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T12:48:16,673 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@652ca842{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T12:48:16,674 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5d802677{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T12:48:16,674 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T12:48:16,674 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@64685bd7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T12:48:16,674 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@49f94f8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38e4ecdb-024e-29a1-2b9a-2c293ee355e5/hadoop.log.dir/,STOPPED} 2024-11-16T12:48:16,675 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T12:48:16,689 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T12:48:16,697 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T12:48:16,703 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T12:48:16,703 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T12:48:16,703 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-16T12:48:16,703 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@22fd605f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38e4ecdb-024e-29a1-2b9a-2c293ee355e5/hadoop.log.dir/,AVAILABLE} 2024-11-16T12:48:16,704 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@69e79afe{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T12:48:16,806 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2e33c425{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38e4ecdb-024e-29a1-2b9a-2c293ee355e5/java.io.tmpdir/jetty-localhost-39583-hadoop-hdfs-3_4_1-tests_jar-_-any-8629584704386432995/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T12:48:16,806 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5c7562d5{HTTP/1.1, (http/1.1)}{localhost:39583} 2024-11-16T12:48:16,806 INFO [Time-limited test {}] server.Server(415): Started @175693ms 2024-11-16T12:48:16,808 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T12:48:17,015 WARN [Thread-1410 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T12:48:17,017 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x787c44a8c10d9c9b with lease ID 0xa345d0ec9465f534: from storage DS-2a54cb75-d9d7-4992-bf7f-7983949a16b1 node DatanodeRegistration(127.0.0.1:34397, datanodeUuid=16dc9a8c-507c-4b32-ac6d-c66c3aa68ced, infoPort=40459, infoSecurePort=0, ipcPort=40651, storageInfo=lv=-57;cid=testClusterID;nsid=1286542615;c=1731761275903), blocks: 6, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-16T12:48:17,018 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x787c44a8c10d9c9b with lease ID 0xa345d0ec9465f534: from storage DS-cc63f746-af09-442a-80bd-397284130258 node DatanodeRegistration(127.0.0.1:34397, datanodeUuid=16dc9a8c-507c-4b32-ac6d-c66c3aa68ced, infoPort=40459, infoSecurePort=0, ipcPort=40651, storageInfo=lv=-57;cid=testClusterID;nsid=1286542615;c=1731761275903), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T12:48:17,196 WARN [Thread-1430 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T12:48:17,198 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9586b4e9e87faf3a with lease ID 0xa345d0ec9465f535: from storage DS-4d01d5cb-26c9-436f-9935-2950565c486b node DatanodeRegistration(127.0.0.1:37325, datanodeUuid=601e6cc2-940d-4257-b079-927fb951f851, infoPort=37869, infoSecurePort=0, ipcPort=41963, storageInfo=lv=-57;cid=testClusterID;nsid=1286542615;c=1731761275903), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T12:48:17,199 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9586b4e9e87faf3a with lease ID 0xa345d0ec9465f535: from storage DS-c2925da7-9402-41e8-b796-28a9a00de2c3 node DatanodeRegistration(127.0.0.1:37325, datanodeUuid=601e6cc2-940d-4257-b079-927fb951f851, infoPort=37869, infoSecurePort=0, ipcPort=41963, storageInfo=lv=-57;cid=testClusterID;nsid=1286542615;c=1731761275903), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T12:48:17,341 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:17,540 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:17,841 INFO [Time-limited test {}] wal.TestLogRolling(389): Data Nodes restarted 2024-11-16T12:48:17,844 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1004 2024-11-16T12:48:17,846 ERROR [FSHLog-0-hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac-prefix:0450ab8807f5,34331,1731761277809 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38907,DS-4d01d5cb-26c9-436f-9935-2950565c486b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:48:17,846 WARN [FSHLog-0-hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac-prefix:0450ab8807f5,34331,1731761277809 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38907,DS-4d01d5cb-26c9-436f-9935-2950565c486b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:48:17,846 DEBUG [regionserver/0450ab8807f5:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 0450ab8807f5%2C34331%2C1731761277809:(num 1731761292441) roll requested 2024-11-16T12:48:17,846 INFO [regionserver/0450ab8807f5:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0450ab8807f5%2C34331%2C1731761277809.1731761297846 2024-11-16T12:48:17,853 DEBUG [regionserver/0450ab8807f5:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/WALs/0450ab8807f5,34331,1731761277809/0450ab8807f5%2C34331%2C1731761277809.1731761292441 newFile=hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/WALs/0450ab8807f5,34331,1731761277809/0450ab8807f5%2C34331%2C1731761277809.1731761297846 2024-11-16T12:48:17,853 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:48:17,853 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:48:17,854 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:48:17,854 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:48:17,854 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:48:17,854 INFO [regionserver/0450ab8807f5:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/WALs/0450ab8807f5,34331,1731761277809/0450ab8807f5%2C34331%2C1731761277809.1731761292441 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/WALs/0450ab8807f5,34331,1731761277809/0450ab8807f5%2C34331%2C1731761277809.1731761297846 2024-11-16T12:48:17,854 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38907,DS-4d01d5cb-26c9-436f-9935-2950565c486b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:48:17,854 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38907,DS-4d01d5cb-26c9-436f-9935-2950565c486b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:48:17,855 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/WALs/0450ab8807f5,34331,1731761277809/0450ab8807f5%2C34331%2C1731761277809.1731761292441 2024-11-16T12:48:17,855 WARN [IPC Server handler 1 on default port 35641 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/WALs/0450ab8807f5,34331,1731761277809/0450ab8807f5%2C34331%2C1731761277809.1731761292441 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741837_1018 2024-11-16T12:48:17,856 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/WALs/0450ab8807f5,34331,1731761277809/0450ab8807f5%2C34331%2C1731761277809.1731761292441 after 0ms 2024-11-16T12:48:17,856 DEBUG [regionserver/0450ab8807f5:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40459:40459),(127.0.0.1/127.0.0.1:37869:37869)] 2024-11-16T12:48:17,856 DEBUG [regionserver/0450ab8807f5:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/WALs/0450ab8807f5,34331,1731761277809/0450ab8807f5%2C34331%2C1731761277809.1731761292441 is not closed yet, will try archiving it next time 2024-11-16T12:48:18,342 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:18,540 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:19,343 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:19,541 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:19,857 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0450ab8807f5%2C34331%2C1731761277809.1731761299857 2024-11-16T12:48:19,863 DEBUG [Time-limited test {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/WALs/0450ab8807f5,34331,1731761277809/0450ab8807f5%2C34331%2C1731761277809.1731761297846 newFile=hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/WALs/0450ab8807f5,34331,1731761277809/0450ab8807f5%2C34331%2C1731761277809.1731761299857 2024-11-16T12:48:19,863 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:48:19,864 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:48:19,864 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:48:19,864 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:48:19,864 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:48:19,864 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/WALs/0450ab8807f5,34331,1731761277809/0450ab8807f5%2C34331%2C1731761277809.1731761297846 with entries=1, filesize=1.23 KB; new WAL /user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/WALs/0450ab8807f5,34331,1731761277809/0450ab8807f5%2C34331%2C1731761277809.1731761299857 2024-11-16T12:48:19,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37325 is added to blk_1073741838_1019 (size=1264) 2024-11-16T12:48:19,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34397 is added to blk_1073741838_1019 (size=1264) 2024-11-16T12:48:19,867 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/WALs/0450ab8807f5,34331,1731761277809/0450ab8807f5%2C34331%2C1731761277809.1731761292441 is not closed yet, will try archiving it next time 2024-11-16T12:48:19,874 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37869:37869),(127.0.0.1/127.0.0.1:40459:40459)] 2024-11-16T12:48:19,874 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/WALs/0450ab8807f5,34331,1731761277809/0450ab8807f5%2C34331%2C1731761277809.1731761292441 is not closed yet, will try archiving it next time 2024-11-16T12:48:19,875 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/WALs/0450ab8807f5,34331,1731761277809/0450ab8807f5%2C34331%2C1731761277809.1731761278457 2024-11-16T12:48:19,875 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/WALs/0450ab8807f5,34331,1731761277809/0450ab8807f5%2C34331%2C1731761277809.1731761278457 2024-11-16T12:48:19,875 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/WALs/0450ab8807f5,34331,1731761277809/0450ab8807f5%2C34331%2C1731761277809.1731761278457 after 0ms 2024-11-16T12:48:19,875 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/WALs/0450ab8807f5,34331,1731761277809/0450ab8807f5%2C34331%2C1731761277809.1731761278457 2024-11-16T12:48:19,901 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1731761279316/Put/vlen=218/seqid=0] 2024-11-16T12:48:19,901 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #4: [row1002/info:/1731761289038/Put/vlen=1045/seqid=0] 2024-11-16T12:48:19,901 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/WALs/0450ab8807f5,34331,1731761277809/0450ab8807f5%2C34331%2C1731761277809.1731761278457 2024-11-16T12:48:19,901 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/WALs/0450ab8807f5,34331,1731761277809/0450ab8807f5%2C34331%2C1731761277809.1731761292441 2024-11-16T12:48:19,902 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/WALs/0450ab8807f5,34331,1731761277809/0450ab8807f5%2C34331%2C1731761277809.1731761292441 2024-11-16T12:48:19,902 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/WALs/0450ab8807f5,34331,1731761277809/0450ab8807f5%2C34331%2C1731761277809.1731761292441 after 0ms 2024-11-16T12:48:19,902 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/WALs/0450ab8807f5,34331,1731761277809/0450ab8807f5%2C34331%2C1731761277809.1731761292441 2024-11-16T12:48:19,906 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #5: [row1003/info:/1731761292440/Put/vlen=1045/seqid=0] 2024-11-16T12:48:19,906 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #6: [row1004/info:/1731761294473/Put/vlen=1045/seqid=0] 2024-11-16T12:48:19,906 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/WALs/0450ab8807f5,34331,1731761277809/0450ab8807f5%2C34331%2C1731761277809.1731761292441 2024-11-16T12:48:19,906 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/WALs/0450ab8807f5,34331,1731761277809/0450ab8807f5%2C34331%2C1731761277809.1731761297846 2024-11-16T12:48:19,906 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/WALs/0450ab8807f5,34331,1731761277809/0450ab8807f5%2C34331%2C1731761277809.1731761297846 2024-11-16T12:48:19,907 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/WALs/0450ab8807f5,34331,1731761277809/0450ab8807f5%2C34331%2C1731761277809.1731761297846 after 1ms 2024-11-16T12:48:19,907 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/WALs/0450ab8807f5,34331,1731761277809/0450ab8807f5%2C34331%2C1731761277809.1731761297846 2024-11-16T12:48:19,910 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #7: [row1005/info:/1731761297845/Put/vlen=1045/seqid=0] 2024-11-16T12:48:19,910 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/WALs/0450ab8807f5,34331,1731761277809/0450ab8807f5%2C34331%2C1731761277809.1731761299857 2024-11-16T12:48:19,910 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/WALs/0450ab8807f5,34331,1731761277809/0450ab8807f5%2C34331%2C1731761277809.1731761299857 2024-11-16T12:48:19,911 WARN [IPC Server handler 3 on default port 35641 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/WALs/0450ab8807f5,34331,1731761277809/0450ab8807f5%2C34331%2C1731761277809.1731761299857 has not been closed. Lease recovery is in progress. RecoveryId = 1022 for block blk_1073741839_1021 2024-11-16T12:48:19,911 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/WALs/0450ab8807f5,34331,1731761277809/0450ab8807f5%2C34331%2C1731761277809.1731761299857 after 1ms 2024-11-16T12:48:20,199 WARN [ResponseProcessor for block BP-1070433440-172.17.0.2-1731761275903:blk_1073741839_1021 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1070433440-172.17.0.2-1731761275903:blk_1073741839_1021 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:48:20,199 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-372225252_22 at /127.0.0.1:49022 [Receiving block BP-1070433440-172.17.0.2-1731761275903:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:37325:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49022 dst: /127.0.0.1:37325 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:37325 remote=/127.0.0.1:49022]. Total timeout mills is 60000, 59664 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T12:48:20,199 WARN [DataStreamer for file /user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/WALs/0450ab8807f5,34331,1731761277809/0450ab8807f5%2C34331%2C1731761277809.1731761299857 block BP-1070433440-172.17.0.2-1731761275903:blk_1073741839_1021 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1070433440-172.17.0.2-1731761275903:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37325,DS-4d01d5cb-26c9-436f-9935-2950565c486b,DISK], DatanodeInfoWithStorage[127.0.0.1:34397,DS-2a54cb75-d9d7-4992-bf7f-7983949a16b1,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37325,DS-4d01d5cb-26c9-436f-9935-2950565c486b,DISK]) is bad. 2024-11-16T12:48:20,199 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-372225252_22 at /127.0.0.1:46214 [Receiving block BP-1070433440-172.17.0.2-1731761275903:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:34397:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46214 dst: /127.0.0.1:34397 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T12:48:20,203 WARN [DataStreamer for file /user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/WALs/0450ab8807f5,34331,1731761277809/0450ab8807f5%2C34331%2C1731761277809.1731761299857 block BP-1070433440-172.17.0.2-1731761275903:blk_1073741839_1021 {}] hdfs.DataStreamer(859): DataStreamer Exception org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1070433440-172.17.0.2-1731761275903:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:48:20,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37325 is added to blk_1073741839_1022 (size=85) 2024-11-16T12:48:20,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34397 is added to blk_1073741839_1022 (size=85) 2024-11-16T12:48:20,344 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:20,542 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:21,017 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741837_1018: GenerationStamp not matched, existing replica is blk_1073741837_1016 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-16T12:48:21,345 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:21,543 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:21,857 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/WALs/0450ab8807f5,34331,1731761277809/0450ab8807f5%2C34331%2C1731761277809.1731761292441 after 4002ms 2024-11-16T12:48:22,345 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:22,544 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:23,346 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:23,545 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:23,912 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/WALs/0450ab8807f5,34331,1731761277809/0450ab8807f5%2C34331%2C1731761277809.1731761299857 after 4002ms 2024-11-16T12:48:23,912 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/WALs/0450ab8807f5,34331,1731761277809/0450ab8807f5%2C34331%2C1731761277809.1731761299857 2024-11-16T12:48:23,917 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/WALs/0450ab8807f5,34331,1731761277809/0450ab8807f5%2C34331%2C1731761277809.1731761299857 2024-11-16T12:48:23,917 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.74 KB heapSize=3.77 KB 2024-11-16T12:48:23,918 ERROR [FSHLog-0-hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac-prefix:0450ab8807f5,34331,1731761277809.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45771,DS-2a54cb75-d9d7-4992-bf7f-7983949a16b1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:48:23,918 WARN [FSHLog-0-hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac-prefix:0450ab8807f5,34331,1731761277809.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45771,DS-2a54cb75-d9d7-4992-bf7f-7983949a16b1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:48:23,918 DEBUG [regionserver/0450ab8807f5:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 0450ab8807f5%2C34331%2C1731761277809.meta:.meta(num 1731761278789) roll requested 2024-11-16T12:48:23,918 INFO [regionserver/0450ab8807f5:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0450ab8807f5%2C34331%2C1731761277809.meta.1731761303918.meta 2024-11-16T12:48:24,117 INFO [regionserver/0450ab8807f5:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 197 ms, current pipeline: null 2024-11-16T12:48:24,117 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:48:24,117 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:48:24,117 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:48:24,118 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:48:24,118 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:48:24,118 INFO [regionserver/0450ab8807f5:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/WALs/0450ab8807f5,34331,1731761277809/0450ab8807f5%2C34331%2C1731761277809.meta.1731761278789.meta with entries=8, filesize=2.36 KB; new WAL /user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/WALs/0450ab8807f5,34331,1731761277809/0450ab8807f5%2C34331%2C1731761277809.meta.1731761303918.meta 2024-11-16T12:48:24,118 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45771,DS-2a54cb75-d9d7-4992-bf7f-7983949a16b1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:48:24,119 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45771,DS-2a54cb75-d9d7-4992-bf7f-7983949a16b1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:48:24,119 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/WALs/0450ab8807f5,34331,1731761277809/0450ab8807f5%2C34331%2C1731761277809.meta.1731761278789.meta 2024-11-16T12:48:24,119 WARN [IPC Server handler 4 on default port 35641 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/WALs/0450ab8807f5,34331,1731761277809/0450ab8807f5%2C34331%2C1731761277809.meta.1731761278789.meta has not been closed. Lease recovery is in progress. RecoveryId = 1024 for block blk_1073741834_1013 2024-11-16T12:48:24,119 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/WALs/0450ab8807f5,34331,1731761277809/0450ab8807f5%2C34331%2C1731761277809.meta.1731761278789.meta after 0ms 2024-11-16T12:48:24,123 DEBUG [regionserver/0450ab8807f5:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40459:40459),(127.0.0.1/127.0.0.1:37869:37869)] 2024-11-16T12:48:24,123 DEBUG [regionserver/0450ab8807f5:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/WALs/0450ab8807f5,34331,1731761277809/0450ab8807f5%2C34331%2C1731761277809.meta.1731761278789.meta is not closed yet, will try archiving it next time 2024-11-16T12:48:24,144 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/data/hbase/meta/1588230740/.tmp/info/c6d60473b244477a8e8d06cb1df9a043 is 207, key is TestLogRolling-testLogRollOnPipelineRestart,,1731761278956.3498cec71140f1b91f56b771484f3868./info:regioninfo/1731761279320/Put/seqid=0 2024-11-16T12:48:24,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37325 is added to blk_1073741841_1025 (size=7125) 2024-11-16T12:48:24,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34397 is added to blk_1073741841_1025 (size=7125) 2024-11-16T12:48:24,150 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.52 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/data/hbase/meta/1588230740/.tmp/info/c6d60473b244477a8e8d06cb1df9a043 2024-11-16T12:48:24,172 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/data/hbase/meta/1588230740/.tmp/ns/b8e8fe958e10461493e1a0592c537f01 is 43, key is default/ns:d/1731761278895/Put/seqid=0 2024-11-16T12:48:24,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37325 is added to blk_1073741842_1026 (size=5153) 2024-11-16T12:48:24,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34397 is added to blk_1073741842_1026 (size=5153) 2024-11-16T12:48:24,177 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/data/hbase/meta/1588230740/.tmp/ns/b8e8fe958e10461493e1a0592c537f01 2024-11-16T12:48:24,201 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/data/hbase/meta/1588230740/.tmp/table/229287129fd3490192ff6f2ed3f71454 is 79, key is TestLogRolling-testLogRollOnPipelineRestart/table:state/1731761279331/Put/seqid=0 2024-11-16T12:48:24,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37325 is added to blk_1073741843_1027 (size=5438) 2024-11-16T12:48:24,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34397 is added to blk_1073741843_1027 (size=5438) 2024-11-16T12:48:24,207 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=150 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/data/hbase/meta/1588230740/.tmp/table/229287129fd3490192ff6f2ed3f71454 2024-11-16T12:48:24,215 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/data/hbase/meta/1588230740/.tmp/info/c6d60473b244477a8e8d06cb1df9a043 as hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/data/hbase/meta/1588230740/info/c6d60473b244477a8e8d06cb1df9a043 2024-11-16T12:48:24,221 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/data/hbase/meta/1588230740/info/c6d60473b244477a8e8d06cb1df9a043, entries=10, sequenceid=11, filesize=7.0 K 2024-11-16T12:48:24,222 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/data/hbase/meta/1588230740/.tmp/ns/b8e8fe958e10461493e1a0592c537f01 as hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/data/hbase/meta/1588230740/ns/b8e8fe958e10461493e1a0592c537f01 2024-11-16T12:48:24,229 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/data/hbase/meta/1588230740/ns/b8e8fe958e10461493e1a0592c537f01, entries=2, sequenceid=11, filesize=5.0 K 2024-11-16T12:48:24,230 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/data/hbase/meta/1588230740/.tmp/table/229287129fd3490192ff6f2ed3f71454 as hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/data/hbase/meta/1588230740/table/229287129fd3490192ff6f2ed3f71454 2024-11-16T12:48:24,238 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/data/hbase/meta/1588230740/table/229287129fd3490192ff6f2ed3f71454, entries=2, sequenceid=11, filesize=5.3 K 2024-11-16T12:48:24,239 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.74 KB/1782, heapSize ~3.48 KB/3560, currentSize=0 B/0 for 1588230740 in 322ms, sequenceid=11, compaction requested=false 2024-11-16T12:48:24,239 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-16T12:48:24,240 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 3498cec71140f1b91f56b771484f3868 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-11-16T12:48:24,240 ERROR [FSHLog-0-hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac-prefix:0450ab8807f5,34331,1731761277809 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1070433440-172.17.0.2-1731761275903:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:48:24,240 WARN [FSHLog-0-hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac-prefix:0450ab8807f5,34331,1731761277809 {}] wal.AbstractFSWAL(2174): append entry failed org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1070433440-172.17.0.2-1731761275903:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:48:24,241 DEBUG [regionserver/0450ab8807f5:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 0450ab8807f5%2C34331%2C1731761277809:(num 1731761299857) roll requested 2024-11-16T12:48:24,241 INFO [regionserver/0450ab8807f5:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0450ab8807f5%2C34331%2C1731761277809.1731761304241 2024-11-16T12:48:24,246 DEBUG [regionserver/0450ab8807f5:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/WALs/0450ab8807f5,34331,1731761277809/0450ab8807f5%2C34331%2C1731761277809.1731761299857 newFile=hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/WALs/0450ab8807f5,34331,1731761277809/0450ab8807f5%2C34331%2C1731761277809.1731761304241 2024-11-16T12:48:24,246 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:48:24,246 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:48:24,246 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:48:24,246 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:48:24,246 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:48:24,246 INFO [regionserver/0450ab8807f5:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/WALs/0450ab8807f5,34331,1731761277809/0450ab8807f5%2C34331%2C1731761277809.1731761299857 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/WALs/0450ab8807f5,34331,1731761277809/0450ab8807f5%2C34331%2C1731761277809.1731761304241 2024-11-16T12:48:24,247 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1070433440-172.17.0.2-1731761275903:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:48:24,247 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1070433440-172.17.0.2-1731761275903:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:48:24,247 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/WALs/0450ab8807f5,34331,1731761277809/0450ab8807f5%2C34331%2C1731761277809.1731761299857 2024-11-16T12:48:24,248 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/WALs/0450ab8807f5,34331,1731761277809/0450ab8807f5%2C34331%2C1731761277809.1731761299857 after 1ms 2024-11-16T12:48:24,257 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/WALs/0450ab8807f5,34331,1731761277809/0450ab8807f5%2C34331%2C1731761277809.1731761299857 to hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/oldWALs/0450ab8807f5%2C34331%2C1731761277809.1731761299857 2024-11-16T12:48:24,259 DEBUG [regionserver/0450ab8807f5:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37869:37869),(127.0.0.1/127.0.0.1:40459:40459)] 2024-11-16T12:48:24,280 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/data/default/TestLogRolling-testLogRollOnPipelineRestart/3498cec71140f1b91f56b771484f3868/.tmp/info/f3d5807b1b6f46a2a632b92b9e2876dd is 1080, key is row1002/info:/1731761289038/Put/seqid=0 2024-11-16T12:48:24,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37325 is added to blk_1073741845_1029 (size=9270) 2024-11-16T12:48:24,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34397 is added to blk_1073741845_1029 (size=9270) 2024-11-16T12:48:24,291 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=8 (bloomFilter=true), to=hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/data/default/TestLogRolling-testLogRollOnPipelineRestart/3498cec71140f1b91f56b771484f3868/.tmp/info/f3d5807b1b6f46a2a632b92b9e2876dd 2024-11-16T12:48:24,298 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/data/default/TestLogRolling-testLogRollOnPipelineRestart/3498cec71140f1b91f56b771484f3868/.tmp/info/f3d5807b1b6f46a2a632b92b9e2876dd as hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/data/default/TestLogRolling-testLogRollOnPipelineRestart/3498cec71140f1b91f56b771484f3868/info/f3d5807b1b6f46a2a632b92b9e2876dd 2024-11-16T12:48:24,305 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/data/default/TestLogRolling-testLogRollOnPipelineRestart/3498cec71140f1b91f56b771484f3868/info/f3d5807b1b6f46a2a632b92b9e2876dd, entries=4, sequenceid=8, filesize=9.1 K 2024-11-16T12:48:24,306 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for 3498cec71140f1b91f56b771484f3868 in 66ms, sequenceid=8, compaction requested=false 2024-11-16T12:48:24,306 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 3498cec71140f1b91f56b771484f3868: 2024-11-16T12:48:24,312 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-16T12:48:24,312 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-16T12:48:24,313 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T12:48:24,313 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T12:48:24,313 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-16T12:48:24,313 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T12:48:24,313 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-16T12:48:24,313 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1755805376, stopped=false 2024-11-16T12:48:24,313 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=0450ab8807f5,33647,1731761277664 2024-11-16T12:48:24,333 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33647-0x10144f99d900000, quorum=127.0.0.1:50242, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-16T12:48:24,333 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34331-0x10144f99d900001, quorum=127.0.0.1:50242, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-16T12:48:24,333 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33647-0x10144f99d900000, quorum=127.0.0.1:50242, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:48:24,333 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34331-0x10144f99d900001, quorum=127.0.0.1:50242, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:48:24,333 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-16T12:48:24,333 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-16T12:48:24,333 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:34331-0x10144f99d900001, quorum=127.0.0.1:50242, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T12:48:24,333 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T12:48:24,334 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T12:48:24,334 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '0450ab8807f5,34331,1731761277809' ***** 2024-11-16T12:48:24,334 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-16T12:48:24,334 INFO [RS:0;0450ab8807f5:34331 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-16T12:48:24,334 INFO [RS:0;0450ab8807f5:34331 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-16T12:48:24,334 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-16T12:48:24,335 INFO [RS:0;0450ab8807f5:34331 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-16T12:48:24,335 INFO [RS:0;0450ab8807f5:34331 {}] regionserver.HRegionServer(3091): Received CLOSE for 3498cec71140f1b91f56b771484f3868 2024-11-16T12:48:24,335 INFO [RS:0;0450ab8807f5:34331 {}] regionserver.HRegionServer(959): stopping server 0450ab8807f5,34331,1731761277809 2024-11-16T12:48:24,335 INFO [RS:0;0450ab8807f5:34331 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-16T12:48:24,335 INFO [RS:0;0450ab8807f5:34331 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;0450ab8807f5:34331. 2024-11-16T12:48:24,335 DEBUG [RS:0;0450ab8807f5:34331 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T12:48:24,335 DEBUG [RS:0;0450ab8807f5:34331 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T12:48:24,335 DEBUG [RS_CLOSE_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 3498cec71140f1b91f56b771484f3868, disabling compactions & flushes 2024-11-16T12:48:24,335 INFO [RS_CLOSE_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1731761278956.3498cec71140f1b91f56b771484f3868. 2024-11-16T12:48:24,335 INFO [RS:0;0450ab8807f5:34331 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-16T12:48:24,335 DEBUG [RS_CLOSE_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731761278956.3498cec71140f1b91f56b771484f3868. 2024-11-16T12:48:24,335 INFO [RS:0;0450ab8807f5:34331 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-16T12:48:24,335 INFO [RS:0;0450ab8807f5:34331 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-16T12:48:24,335 DEBUG [RS_CLOSE_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731761278956.3498cec71140f1b91f56b771484f3868. after waiting 0 ms 2024-11-16T12:48:24,335 DEBUG [RS_CLOSE_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1731761278956.3498cec71140f1b91f56b771484f3868. 2024-11-16T12:48:24,335 INFO [RS:0;0450ab8807f5:34331 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-16T12:48:24,336 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:33647-0x10144f99d900000, quorum=127.0.0.1:50242, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T12:48:24,336 INFO [RS:0;0450ab8807f5:34331 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-16T12:48:24,336 DEBUG [RS:0;0450ab8807f5:34331 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 3498cec71140f1b91f56b771484f3868=TestLogRolling-testLogRollOnPipelineRestart,,1731761278956.3498cec71140f1b91f56b771484f3868.} 2024-11-16T12:48:24,336 DEBUG [RS:0;0450ab8807f5:34331 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 3498cec71140f1b91f56b771484f3868 2024-11-16T12:48:24,337 DEBUG [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-16T12:48:24,337 INFO [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-16T12:48:24,337 DEBUG [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-16T12:48:24,337 DEBUG [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-16T12:48:24,337 DEBUG [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-16T12:48:24,341 DEBUG [RS_CLOSE_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/data/default/TestLogRolling-testLogRollOnPipelineRestart/3498cec71140f1b91f56b771484f3868/recovered.edits/11.seqid, newMaxSeqId=11, maxSeqId=1 2024-11-16T12:48:24,341 INFO [RS_CLOSE_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731761278956.3498cec71140f1b91f56b771484f3868. 2024-11-16T12:48:24,341 DEBUG [RS_CLOSE_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 3498cec71140f1b91f56b771484f3868: Waiting for close lock at 1731761304335Running coprocessor pre-close hooks at 1731761304335Disabling compacts and flushes for region at 1731761304335Disabling writes for close at 1731761304335Writing region close event to WAL at 1731761304336 (+1 ms)Running coprocessor post-close hooks at 1731761304341 (+5 ms)Closed at 1731761304341 2024-11-16T12:48:24,342 DEBUG [RS_CLOSE_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731761278956.3498cec71140f1b91f56b771484f3868. 2024-11-16T12:48:24,342 DEBUG [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-16T12:48:24,343 DEBUG [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-16T12:48:24,343 INFO [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-16T12:48:24,343 DEBUG [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731761304336Running coprocessor pre-close hooks at 1731761304336Disabling compacts and flushes for region at 1731761304337 (+1 ms)Disabling writes for close at 1731761304337Writing region close event to WAL at 1731761304338 (+1 ms)Running coprocessor post-close hooks at 1731761304343 (+5 ms)Closed at 1731761304343 2024-11-16T12:48:24,343 DEBUG [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-16T12:48:24,347 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:24,536 INFO [RS:0;0450ab8807f5:34331 {}] regionserver.HRegionServer(976): stopping server 0450ab8807f5,34331,1731761277809; all regions closed. 2024-11-16T12:48:24,537 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:48:24,537 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:48:24,537 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:48:24,537 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:48:24,538 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:48:24,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37325 is added to blk_1073741840_1023 (size=825) 2024-11-16T12:48:24,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34397 is added to blk_1073741840_1023 (size=825) 2024-11-16T12:48:24,545 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:25,313 INFO [regionserver/0450ab8807f5:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-16T12:48:25,313 INFO [regionserver/0450ab8807f5:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-16T12:48:25,347 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:25,546 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:26,314 INFO [regionserver/0450ab8807f5:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-16T12:48:26,348 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:26,547 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:26,950 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-16T12:48:26,950 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-16T12:48:26,951 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-16T12:48:27,201 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741834_1013: GenerationStamp not matched, existing replica is blk_1073741834_1010 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-16T12:48:27,349 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:27,548 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:27,640 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-16T12:48:28,120 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/WALs/0450ab8807f5,34331,1731761277809/0450ab8807f5%2C34331%2C1731761277809.meta.1731761278789.meta after 4001ms 2024-11-16T12:48:28,121 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/WALs/0450ab8807f5,34331,1731761277809/0450ab8807f5%2C34331%2C1731761277809.meta.1731761278789.meta to hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/oldWALs/0450ab8807f5%2C34331%2C1731761277809.meta.1731761278789.meta 2024-11-16T12:48:28,126 DEBUG [RS:0;0450ab8807f5:34331 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/oldWALs 2024-11-16T12:48:28,126 INFO [RS:0;0450ab8807f5:34331 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 0450ab8807f5%2C34331%2C1731761277809.meta:.meta(num 1731761303918) 2024-11-16T12:48:28,127 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:48:28,127 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:48:28,127 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:48:28,127 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:48:28,127 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:48:28,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34397 is added to blk_1073741844_1028 (size=1162) 2024-11-16T12:48:28,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37325 is added to blk_1073741844_1028 (size=1162) 2024-11-16T12:48:28,135 DEBUG [RS:0;0450ab8807f5:34331 {}] wal.AbstractFSWAL(1256): Moved 4 WAL file(s) to /user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/oldWALs 2024-11-16T12:48:28,135 INFO [RS:0;0450ab8807f5:34331 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 0450ab8807f5%2C34331%2C1731761277809:(num 1731761304241) 2024-11-16T12:48:28,135 DEBUG [RS:0;0450ab8807f5:34331 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T12:48:28,135 INFO [RS:0;0450ab8807f5:34331 {}] regionserver.LeaseManager(133): Closed leases 2024-11-16T12:48:28,135 INFO [RS:0;0450ab8807f5:34331 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-16T12:48:28,135 INFO [RS:0;0450ab8807f5:34331 {}] hbase.ChoreService(370): Chore service for: regionserver/0450ab8807f5:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-16T12:48:28,135 INFO [RS:0;0450ab8807f5:34331 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-16T12:48:28,135 INFO [regionserver/0450ab8807f5:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-16T12:48:28,136 INFO [RS:0;0450ab8807f5:34331 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34331 2024-11-16T12:48:28,190 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34331-0x10144f99d900001, quorum=127.0.0.1:50242, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/0450ab8807f5,34331,1731761277809 2024-11-16T12:48:28,190 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33647-0x10144f99d900000, quorum=127.0.0.1:50242, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-16T12:48:28,190 INFO [RS:0;0450ab8807f5:34331 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-16T12:48:28,198 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [0450ab8807f5,34331,1731761277809] 2024-11-16T12:48:28,207 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/0450ab8807f5,34331,1731761277809 already deleted, retry=false 2024-11-16T12:48:28,207 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 0450ab8807f5,34331,1731761277809 expired; onlineServers=0 2024-11-16T12:48:28,207 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '0450ab8807f5,33647,1731761277664' ***** 2024-11-16T12:48:28,207 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-16T12:48:28,207 INFO [M:0;0450ab8807f5:33647 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-16T12:48:28,207 INFO [M:0;0450ab8807f5:33647 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-16T12:48:28,207 DEBUG [M:0;0450ab8807f5:33647 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-16T12:48:28,207 DEBUG [M:0;0450ab8807f5:33647 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-16T12:48:28,207 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-16T12:48:28,207 DEBUG [master/0450ab8807f5:0:becomeActiveMaster-HFileCleaner.small.0-1731761278152 {}] cleaner.HFileCleaner(306): Exit Thread[master/0450ab8807f5:0:becomeActiveMaster-HFileCleaner.small.0-1731761278152,5,FailOnTimeoutGroup] 2024-11-16T12:48:28,207 DEBUG [master/0450ab8807f5:0:becomeActiveMaster-HFileCleaner.large.0-1731761278152 {}] cleaner.HFileCleaner(306): Exit Thread[master/0450ab8807f5:0:becomeActiveMaster-HFileCleaner.large.0-1731761278152,5,FailOnTimeoutGroup] 2024-11-16T12:48:28,208 INFO [M:0;0450ab8807f5:33647 {}] hbase.ChoreService(370): Chore service for: master/0450ab8807f5:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-16T12:48:28,208 INFO [M:0;0450ab8807f5:33647 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-16T12:48:28,208 DEBUG [M:0;0450ab8807f5:33647 {}] master.HMaster(1795): Stopping service threads 2024-11-16T12:48:28,208 INFO [M:0;0450ab8807f5:33647 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-16T12:48:28,208 INFO [M:0;0450ab8807f5:33647 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-16T12:48:28,208 INFO [M:0;0450ab8807f5:33647 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-16T12:48:28,208 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-16T12:48:28,215 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33647-0x10144f99d900000, quorum=127.0.0.1:50242, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-16T12:48:28,215 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33647-0x10144f99d900000, quorum=127.0.0.1:50242, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:48:28,215 DEBUG [M:0;0450ab8807f5:33647 {}] zookeeper.ZKUtil(347): master:33647-0x10144f99d900000, quorum=127.0.0.1:50242, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-16T12:48:28,216 WARN [M:0;0450ab8807f5:33647 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-16T12:48:28,216 INFO [M:0;0450ab8807f5:33647 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/.lastflushedseqids 2024-11-16T12:48:28,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37325 is added to blk_1073741846_1030 (size=111) 2024-11-16T12:48:28,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34397 is added to blk_1073741846_1030 (size=111) 2024-11-16T12:48:28,224 INFO [M:0;0450ab8807f5:33647 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-16T12:48:28,225 INFO [M:0;0450ab8807f5:33647 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-16T12:48:28,225 DEBUG [M:0;0450ab8807f5:33647 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-16T12:48:28,225 INFO [M:0;0450ab8807f5:33647 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T12:48:28,225 DEBUG [M:0;0450ab8807f5:33647 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T12:48:28,225 DEBUG [M:0;0450ab8807f5:33647 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-16T12:48:28,225 DEBUG [M:0;0450ab8807f5:33647 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T12:48:28,225 INFO [M:0;0450ab8807f5:33647 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.17 KB heapSize=29.16 KB 2024-11-16T12:48:28,226 ERROR [FSHLog-0-hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/MasterData-prefix:0450ab8807f5,33647,1731761277664 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45771,DS-2a54cb75-d9d7-4992-bf7f-7983949a16b1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:48:28,226 WARN [FSHLog-0-hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/MasterData-prefix:0450ab8807f5,33647,1731761277664 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45771,DS-2a54cb75-d9d7-4992-bf7f-7983949a16b1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:48:28,226 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 0450ab8807f5%2C33647%2C1731761277664:(num 1731761277928) roll requested 2024-11-16T12:48:28,226 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0450ab8807f5%2C33647%2C1731761277664.1731761308226 2024-11-16T12:48:28,232 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:48:28,232 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:48:28,232 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:48:28,233 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:48:28,233 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:48:28,233 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/MasterData/WALs/0450ab8807f5,33647,1731761277664/0450ab8807f5%2C33647%2C1731761277664.1731761277928 with entries=53, filesize=26.62 KB; new WAL /user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/MasterData/WALs/0450ab8807f5,33647,1731761277664/0450ab8807f5%2C33647%2C1731761277664.1731761308226 2024-11-16T12:48:28,233 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45771,DS-2a54cb75-d9d7-4992-bf7f-7983949a16b1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:48:28,233 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45771,DS-2a54cb75-d9d7-4992-bf7f-7983949a16b1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T12:48:28,233 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/MasterData/WALs/0450ab8807f5,33647,1731761277664/0450ab8807f5%2C33647%2C1731761277664.1731761277928 2024-11-16T12:48:28,234 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40459:40459),(127.0.0.1/127.0.0.1:37869:37869)] 2024-11-16T12:48:28,234 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/MasterData/WALs/0450ab8807f5,33647,1731761277664/0450ab8807f5%2C33647%2C1731761277664.1731761277928 is not closed yet, will try archiving it next time 2024-11-16T12:48:28,234 WARN [IPC Server handler 1 on default port 35641 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/MasterData/WALs/0450ab8807f5,33647,1731761277664/0450ab8807f5%2C33647%2C1731761277664.1731761277928 has not been closed. Lease recovery is in progress. RecoveryId = 1032 for block blk_1073741830_1015 2024-11-16T12:48:28,234 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/MasterData/WALs/0450ab8807f5,33647,1731761277664/0450ab8807f5%2C33647%2C1731761277664.1731761277928 after 1ms 2024-11-16T12:48:28,251 DEBUG [M:0;0450ab8807f5:33647 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ef056ddb0db740c3920855a0a626f073 is 82, key is hbase:meta,,1/info:regioninfo/1731761278819/Put/seqid=0 2024-11-16T12:48:28,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34397 is added to blk_1073741848_1033 (size=5672) 2024-11-16T12:48:28,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37325 is added to blk_1073741848_1033 (size=5672) 2024-11-16T12:48:28,299 INFO [RS:0;0450ab8807f5:34331 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-16T12:48:28,299 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34331-0x10144f99d900001, quorum=127.0.0.1:50242, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T12:48:28,299 INFO [RS:0;0450ab8807f5:34331 {}] regionserver.HRegionServer(1031): Exiting; stopping=0450ab8807f5,34331,1731761277809; zookeeper connection closed. 2024-11-16T12:48:28,299 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34331-0x10144f99d900001, quorum=127.0.0.1:50242, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T12:48:28,299 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@4921a2f7 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@4921a2f7 2024-11-16T12:48:28,299 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-16T12:48:28,350 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:28,548 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:28,658 INFO [M:0;0450ab8807f5:33647 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ef056ddb0db740c3920855a0a626f073 2024-11-16T12:48:28,681 DEBUG [M:0;0450ab8807f5:33647 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/fdfcae1bd43745528c0f859e43397482 is 778, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731761279337/Put/seqid=0 2024-11-16T12:48:28,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37325 is added to blk_1073741849_1034 (size=6118) 2024-11-16T12:48:28,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34397 is added to blk_1073741849_1034 (size=6118) 2024-11-16T12:48:28,686 INFO [M:0;0450ab8807f5:33647 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.57 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/fdfcae1bd43745528c0f859e43397482 2024-11-16T12:48:28,705 DEBUG [M:0;0450ab8807f5:33647 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/8293c4dbcbd841c28ca8d0c3b8fbbd05 is 69, key is 0450ab8807f5,34331,1731761277809/rs:state/1731761278290/Put/seqid=0 2024-11-16T12:48:28,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34397 is added to blk_1073741850_1035 (size=5156) 2024-11-16T12:48:28,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37325 is added to blk_1073741850_1035 (size=5156) 2024-11-16T12:48:28,709 INFO [M:0;0450ab8807f5:33647 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/8293c4dbcbd841c28ca8d0c3b8fbbd05 2024-11-16T12:48:28,729 DEBUG [M:0;0450ab8807f5:33647 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/657ac14c7d964a20ac3bdb0904d294ed is 52, key is load_balancer_on/state:d/1731761278950/Put/seqid=0 2024-11-16T12:48:28,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37325 is added to blk_1073741851_1036 (size=5056) 2024-11-16T12:48:28,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34397 is added to blk_1073741851_1036 (size=5056) 2024-11-16T12:48:28,735 INFO [M:0;0450ab8807f5:33647 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/657ac14c7d964a20ac3bdb0904d294ed 2024-11-16T12:48:28,740 DEBUG [M:0;0450ab8807f5:33647 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ef056ddb0db740c3920855a0a626f073 as hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/ef056ddb0db740c3920855a0a626f073 2024-11-16T12:48:28,745 INFO [M:0;0450ab8807f5:33647 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/ef056ddb0db740c3920855a0a626f073, entries=8, sequenceid=56, filesize=5.5 K 2024-11-16T12:48:28,746 DEBUG [M:0;0450ab8807f5:33647 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/fdfcae1bd43745528c0f859e43397482 as hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/fdfcae1bd43745528c0f859e43397482 2024-11-16T12:48:28,753 INFO [M:0;0450ab8807f5:33647 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/fdfcae1bd43745528c0f859e43397482, entries=6, sequenceid=56, filesize=6.0 K 2024-11-16T12:48:28,754 DEBUG [M:0;0450ab8807f5:33647 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/8293c4dbcbd841c28ca8d0c3b8fbbd05 as hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/8293c4dbcbd841c28ca8d0c3b8fbbd05 2024-11-16T12:48:28,759 INFO [M:0;0450ab8807f5:33647 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/8293c4dbcbd841c28ca8d0c3b8fbbd05, entries=1, sequenceid=56, filesize=5.0 K 2024-11-16T12:48:28,760 DEBUG [M:0;0450ab8807f5:33647 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/657ac14c7d964a20ac3bdb0904d294ed as hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/657ac14c7d964a20ac3bdb0904d294ed 2024-11-16T12:48:28,766 INFO [M:0;0450ab8807f5:33647 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/657ac14c7d964a20ac3bdb0904d294ed, entries=1, sequenceid=56, filesize=4.9 K 2024-11-16T12:48:28,768 INFO [M:0;0450ab8807f5:33647 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.17 KB/23726, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 542ms, sequenceid=56, compaction requested=false 2024-11-16T12:48:28,769 INFO [M:0;0450ab8807f5:33647 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T12:48:28,769 DEBUG [M:0;0450ab8807f5:33647 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731761308225Disabling compacts and flushes for region at 1731761308225Disabling writes for close at 1731761308225Obtaining lock to block concurrent updates at 1731761308225Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731761308225Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23726, getHeapSize=29800, getOffHeapSize=0, getCellsCount=67 at 1731761308226 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731761308234 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731761308234Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731761308250 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731761308250Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731761308665 (+415 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731761308680 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731761308681 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731761308691 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731761308704 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731761308704Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731761308714 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731761308729 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731761308729Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@521b5439: reopening flushed file at 1731761308740 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2fb98011: reopening flushed file at 1731761308745 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@756f69cb: reopening flushed file at 1731761308753 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@fae8c7f: reopening flushed file at 1731761308760 (+7 ms)Finished flush of dataSize ~23.17 KB/23726, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 542ms, sequenceid=56, compaction requested=false at 1731761308768 (+8 ms)Writing region close event to WAL at 1731761308769 (+1 ms)Closed at 1731761308769 2024-11-16T12:48:28,769 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:48:28,769 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:48:28,769 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:48:28,769 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:48:28,770 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:48:28,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37325 is added to blk_1073741847_1031 (size=757) 2024-11-16T12:48:28,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34397 is added to blk_1073741847_1031 (size=757) 2024-11-16T12:48:29,342 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:48:29,342 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:48:29,350 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:29,362 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:48:29,363 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:48:29,363 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:48:29,363 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:48:29,363 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:48:29,363 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:48:29,368 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:48:29,368 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:48:29,368 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:48:29,370 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:48:29,374 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:48:29,374 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:48:29,549 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:29,877 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-16T12:48:29,878 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:48:29,878 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:48:29,878 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:48:29,879 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:48:29,901 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:48:29,901 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:48:29,901 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:48:29,902 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:48:29,902 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:48:29,902 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:48:29,906 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:48:29,906 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:48:29,906 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:48:29,909 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:48:30,201 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741830_1015: GenerationStamp not matched, existing replica is blk_1073741830_1006 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-16T12:48:30,351 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:30,550 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:31,352 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:31,550 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:32,235 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/MasterData/WALs/0450ab8807f5,33647,1731761277664/0450ab8807f5%2C33647%2C1731761277664.1731761277928 after 4002ms 2024-11-16T12:48:32,236 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/MasterData/WALs/0450ab8807f5,33647,1731761277664/0450ab8807f5%2C33647%2C1731761277664.1731761277928 to hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/MasterData/oldWALs/0450ab8807f5%2C33647%2C1731761277664.1731761277928 2024-11-16T12:48:32,240 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/MasterData/oldWALs/0450ab8807f5%2C33647%2C1731761277664.1731761277928 to hdfs://localhost:35641/user/jenkins/test-data/b8a3cc08-7b6a-23e2-0857-f08cc44939ac/oldWALs/0450ab8807f5%2C33647%2C1731761277664.1731761277928$masterlocalwal$ 2024-11-16T12:48:32,240 INFO [M:0;0450ab8807f5:33647 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-16T12:48:32,240 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-16T12:48:32,240 INFO [M:0;0450ab8807f5:33647 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33647 2024-11-16T12:48:32,241 INFO [M:0;0450ab8807f5:33647 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-16T12:48:32,353 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:32,356 INFO [M:0;0450ab8807f5:33647 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-16T12:48:32,356 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33647-0x10144f99d900000, quorum=127.0.0.1:50242, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T12:48:32,356 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33647-0x10144f99d900000, quorum=127.0.0.1:50242, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T12:48:32,393 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2e33c425{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T12:48:32,394 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5c7562d5{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T12:48:32,394 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T12:48:32,394 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@69e79afe{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T12:48:32,394 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@22fd605f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38e4ecdb-024e-29a1-2b9a-2c293ee355e5/hadoop.log.dir/,STOPPED} 2024-11-16T12:48:32,395 WARN [BP-1070433440-172.17.0.2-1731761275903 heartbeating to localhost/127.0.0.1:35641 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T12:48:32,395 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T12:48:32,395 WARN [BP-1070433440-172.17.0.2-1731761275903 heartbeating to localhost/127.0.0.1:35641 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1070433440-172.17.0.2-1731761275903 (Datanode Uuid 601e6cc2-940d-4257-b079-927fb951f851) service to localhost/127.0.0.1:35641 2024-11-16T12:48:32,395 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T12:48:32,396 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38e4ecdb-024e-29a1-2b9a-2c293ee355e5/cluster_3a137a3e-714c-a10c-73f2-ab0b29e449ad/data/data3/current/BP-1070433440-172.17.0.2-1731761275903 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T12:48:32,396 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38e4ecdb-024e-29a1-2b9a-2c293ee355e5/cluster_3a137a3e-714c-a10c-73f2-ab0b29e449ad/data/data4/current/BP-1070433440-172.17.0.2-1731761275903 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T12:48:32,396 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T12:48:32,399 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@39ebff59{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T12:48:32,399 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@160f7ba9{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T12:48:32,399 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T12:48:32,400 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@16fecb8d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T12:48:32,400 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@21ffcd24{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38e4ecdb-024e-29a1-2b9a-2c293ee355e5/hadoop.log.dir/,STOPPED} 2024-11-16T12:48:32,401 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T12:48:32,401 WARN [BP-1070433440-172.17.0.2-1731761275903 heartbeating to localhost/127.0.0.1:35641 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T12:48:32,401 WARN [BP-1070433440-172.17.0.2-1731761275903 heartbeating to localhost/127.0.0.1:35641 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1070433440-172.17.0.2-1731761275903 (Datanode Uuid 16dc9a8c-507c-4b32-ac6d-c66c3aa68ced) service to localhost/127.0.0.1:35641 2024-11-16T12:48:32,401 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T12:48:32,402 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38e4ecdb-024e-29a1-2b9a-2c293ee355e5/cluster_3a137a3e-714c-a10c-73f2-ab0b29e449ad/data/data1/current/BP-1070433440-172.17.0.2-1731761275903 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T12:48:32,402 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38e4ecdb-024e-29a1-2b9a-2c293ee355e5/cluster_3a137a3e-714c-a10c-73f2-ab0b29e449ad/data/data2/current/BP-1070433440-172.17.0.2-1731761275903 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T12:48:32,402 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T12:48:32,408 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@27ffc774{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-16T12:48:32,408 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@144c75a2{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T12:48:32,409 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T12:48:32,409 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7dee0203{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T12:48:32,409 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@56e801fe{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38e4ecdb-024e-29a1-2b9a-2c293ee355e5/hadoop.log.dir/,STOPPED} 2024-11-16T12:48:32,417 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-16T12:48:32,439 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-16T12:48:32,447 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=182 (was 156) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:35641 from jenkins.hfs.4 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-10-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35641 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35641 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35641 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Command processor java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Potentially hanging thread: nioEventLoopGroup-31-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.4@localhost:35641 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:35641 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:35641 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:35641 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) - Thread LEAK? -, OpenFileDescriptor=457 (was 448) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=230 (was 184) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=3057 (was 3292) 2024-11-16T12:48:32,455 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=182, OpenFileDescriptor=457, MaxFileDescriptor=1048576, SystemLoadAverage=230, ProcessCount=11, AvailableMemoryMB=3056 2024-11-16T12:48:32,455 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-16T12:48:32,455 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38e4ecdb-024e-29a1-2b9a-2c293ee355e5/hadoop.log.dir so I do NOT create it in target/test-data/f870c0f7-8ea4-66b3-3016-e319c3ca44d2 2024-11-16T12:48:32,455 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/38e4ecdb-024e-29a1-2b9a-2c293ee355e5/hadoop.tmp.dir so I do NOT create it in target/test-data/f870c0f7-8ea4-66b3-3016-e319c3ca44d2 2024-11-16T12:48:32,455 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f870c0f7-8ea4-66b3-3016-e319c3ca44d2/cluster_271b2c1e-f235-9793-e49e-e20dc34e4459, deleteOnExit=true 2024-11-16T12:48:32,455 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-16T12:48:32,456 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f870c0f7-8ea4-66b3-3016-e319c3ca44d2/test.cache.data in system properties and HBase conf 2024-11-16T12:48:32,456 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f870c0f7-8ea4-66b3-3016-e319c3ca44d2/hadoop.tmp.dir in system properties and HBase conf 2024-11-16T12:48:32,456 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f870c0f7-8ea4-66b3-3016-e319c3ca44d2/hadoop.log.dir in system properties and HBase conf 2024-11-16T12:48:32,456 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f870c0f7-8ea4-66b3-3016-e319c3ca44d2/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-16T12:48:32,456 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f870c0f7-8ea4-66b3-3016-e319c3ca44d2/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-16T12:48:32,456 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-16T12:48:32,456 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-16T12:48:32,456 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f870c0f7-8ea4-66b3-3016-e319c3ca44d2/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-16T12:48:32,456 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f870c0f7-8ea4-66b3-3016-e319c3ca44d2/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-16T12:48:32,456 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f870c0f7-8ea4-66b3-3016-e319c3ca44d2/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-16T12:48:32,457 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f870c0f7-8ea4-66b3-3016-e319c3ca44d2/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-16T12:48:32,457 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f870c0f7-8ea4-66b3-3016-e319c3ca44d2/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-16T12:48:32,457 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f870c0f7-8ea4-66b3-3016-e319c3ca44d2/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-16T12:48:32,457 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f870c0f7-8ea4-66b3-3016-e319c3ca44d2/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-16T12:48:32,457 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f870c0f7-8ea4-66b3-3016-e319c3ca44d2/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-16T12:48:32,457 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f870c0f7-8ea4-66b3-3016-e319c3ca44d2/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-16T12:48:32,457 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f870c0f7-8ea4-66b3-3016-e319c3ca44d2/nfs.dump.dir in system properties and HBase conf 2024-11-16T12:48:32,457 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f870c0f7-8ea4-66b3-3016-e319c3ca44d2/java.io.tmpdir in system properties and HBase conf 2024-11-16T12:48:32,457 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f870c0f7-8ea4-66b3-3016-e319c3ca44d2/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-16T12:48:32,457 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f870c0f7-8ea4-66b3-3016-e319c3ca44d2/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-16T12:48:32,457 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f870c0f7-8ea4-66b3-3016-e319c3ca44d2/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-16T12:48:32,472 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-16T12:48:32,551 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:32,712 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T12:48:32,717 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T12:48:32,718 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T12:48:32,718 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T12:48:32,718 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-16T12:48:32,722 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T12:48:32,725 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@72ef9fa2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f870c0f7-8ea4-66b3-3016-e319c3ca44d2/hadoop.log.dir/,AVAILABLE} 2024-11-16T12:48:32,725 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@438a440e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T12:48:32,834 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@61fcc471{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f870c0f7-8ea4-66b3-3016-e319c3ca44d2/java.io.tmpdir/jetty-localhost-43073-hadoop-hdfs-3_4_1-tests_jar-_-any-992653536202287749/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-16T12:48:32,835 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@74b5ebca{HTTP/1.1, (http/1.1)}{localhost:43073} 2024-11-16T12:48:32,835 INFO [Time-limited test {}] server.Server(415): Started @191722ms 2024-11-16T12:48:32,849 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-16T12:48:33,053 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T12:48:33,056 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T12:48:33,057 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T12:48:33,057 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T12:48:33,057 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-16T12:48:33,057 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@757e4aa2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f870c0f7-8ea4-66b3-3016-e319c3ca44d2/hadoop.log.dir/,AVAILABLE} 2024-11-16T12:48:33,058 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@448bc78{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T12:48:33,159 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5c7d522f{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f870c0f7-8ea4-66b3-3016-e319c3ca44d2/java.io.tmpdir/jetty-localhost-32925-hadoop-hdfs-3_4_1-tests_jar-_-any-9483375573493789553/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T12:48:33,159 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@14a6d451{HTTP/1.1, (http/1.1)}{localhost:32925} 2024-11-16T12:48:33,159 INFO [Time-limited test {}] server.Server(415): Started @192046ms 2024-11-16T12:48:33,160 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T12:48:33,189 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T12:48:33,193 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T12:48:33,193 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T12:48:33,193 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T12:48:33,194 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-16T12:48:33,194 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@69ea5868{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f870c0f7-8ea4-66b3-3016-e319c3ca44d2/hadoop.log.dir/,AVAILABLE} 2024-11-16T12:48:33,194 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@77b33594{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T12:48:33,302 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@339093f7{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f870c0f7-8ea4-66b3-3016-e319c3ca44d2/java.io.tmpdir/jetty-localhost-35987-hadoop-hdfs-3_4_1-tests_jar-_-any-2468536476881894733/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T12:48:33,302 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@720d391f{HTTP/1.1, (http/1.1)}{localhost:35987} 2024-11-16T12:48:33,302 INFO [Time-limited test {}] server.Server(415): Started @192189ms 2024-11-16T12:48:33,303 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T12:48:33,354 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:33,552 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:33,831 WARN [Thread-1650 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f870c0f7-8ea4-66b3-3016-e319c3ca44d2/cluster_271b2c1e-f235-9793-e49e-e20dc34e4459/data/data1/current/BP-1518043770-172.17.0.2-1731761312484/current, will proceed with Du for space computation calculation, 2024-11-16T12:48:33,831 WARN [Thread-1651 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f870c0f7-8ea4-66b3-3016-e319c3ca44d2/cluster_271b2c1e-f235-9793-e49e-e20dc34e4459/data/data2/current/BP-1518043770-172.17.0.2-1731761312484/current, will proceed with Du for space computation calculation, 2024-11-16T12:48:33,856 WARN [Thread-1614 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T12:48:33,859 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5c2ae2cecd38583c with lease ID 0xd9585c7b46188ed2: Processing first storage report for DS-ba9de3e6-b8ca-4bc4-b3e2-8488af920e1f from datanode DatanodeRegistration(127.0.0.1:33883, datanodeUuid=6857ad7e-8171-45ee-a58a-6b344fbd5877, infoPort=34079, infoSecurePort=0, ipcPort=44357, storageInfo=lv=-57;cid=testClusterID;nsid=1825495678;c=1731761312484) 2024-11-16T12:48:33,859 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5c2ae2cecd38583c with lease ID 0xd9585c7b46188ed2: from storage DS-ba9de3e6-b8ca-4bc4-b3e2-8488af920e1f node DatanodeRegistration(127.0.0.1:33883, datanodeUuid=6857ad7e-8171-45ee-a58a-6b344fbd5877, infoPort=34079, infoSecurePort=0, ipcPort=44357, storageInfo=lv=-57;cid=testClusterID;nsid=1825495678;c=1731761312484), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T12:48:33,859 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5c2ae2cecd38583c with lease ID 0xd9585c7b46188ed2: Processing first storage report for DS-c90a2bf2-7f3a-42f7-8f69-00fe0dbe10d4 from datanode DatanodeRegistration(127.0.0.1:33883, datanodeUuid=6857ad7e-8171-45ee-a58a-6b344fbd5877, infoPort=34079, infoSecurePort=0, ipcPort=44357, storageInfo=lv=-57;cid=testClusterID;nsid=1825495678;c=1731761312484) 2024-11-16T12:48:33,859 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5c2ae2cecd38583c with lease ID 0xd9585c7b46188ed2: from storage DS-c90a2bf2-7f3a-42f7-8f69-00fe0dbe10d4 node DatanodeRegistration(127.0.0.1:33883, datanodeUuid=6857ad7e-8171-45ee-a58a-6b344fbd5877, infoPort=34079, infoSecurePort=0, ipcPort=44357, storageInfo=lv=-57;cid=testClusterID;nsid=1825495678;c=1731761312484), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T12:48:33,972 WARN [Thread-1661 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f870c0f7-8ea4-66b3-3016-e319c3ca44d2/cluster_271b2c1e-f235-9793-e49e-e20dc34e4459/data/data3/current/BP-1518043770-172.17.0.2-1731761312484/current, will proceed with Du for space computation calculation, 2024-11-16T12:48:33,972 WARN [Thread-1662 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f870c0f7-8ea4-66b3-3016-e319c3ca44d2/cluster_271b2c1e-f235-9793-e49e-e20dc34e4459/data/data4/current/BP-1518043770-172.17.0.2-1731761312484/current, will proceed with Du for space computation calculation, 2024-11-16T12:48:33,994 WARN [Thread-1637 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T12:48:33,996 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa3134bf4d397f702 with lease ID 0xd9585c7b46188ed3: Processing first storage report for DS-89652e22-0ea4-407f-b31b-43fed3cec942 from datanode DatanodeRegistration(127.0.0.1:46877, datanodeUuid=46937fe2-e34e-46a3-b22f-f4bc216f0bb0, infoPort=36847, infoSecurePort=0, ipcPort=37987, storageInfo=lv=-57;cid=testClusterID;nsid=1825495678;c=1731761312484) 2024-11-16T12:48:33,996 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa3134bf4d397f702 with lease ID 0xd9585c7b46188ed3: from storage DS-89652e22-0ea4-407f-b31b-43fed3cec942 node DatanodeRegistration(127.0.0.1:46877, datanodeUuid=46937fe2-e34e-46a3-b22f-f4bc216f0bb0, infoPort=36847, infoSecurePort=0, ipcPort=37987, storageInfo=lv=-57;cid=testClusterID;nsid=1825495678;c=1731761312484), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T12:48:33,996 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa3134bf4d397f702 with lease ID 0xd9585c7b46188ed3: Processing first storage report for DS-fc6233f8-f447-4f11-a4db-71e69f30a1e8 from datanode DatanodeRegistration(127.0.0.1:46877, datanodeUuid=46937fe2-e34e-46a3-b22f-f4bc216f0bb0, infoPort=36847, infoSecurePort=0, ipcPort=37987, storageInfo=lv=-57;cid=testClusterID;nsid=1825495678;c=1731761312484) 2024-11-16T12:48:33,997 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa3134bf4d397f702 with lease ID 0xd9585c7b46188ed3: from storage DS-fc6233f8-f447-4f11-a4db-71e69f30a1e8 node DatanodeRegistration(127.0.0.1:46877, datanodeUuid=46937fe2-e34e-46a3-b22f-f4bc216f0bb0, infoPort=36847, infoSecurePort=0, ipcPort=37987, storageInfo=lv=-57;cid=testClusterID;nsid=1825495678;c=1731761312484), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T12:48:34,033 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f870c0f7-8ea4-66b3-3016-e319c3ca44d2 2024-11-16T12:48:34,051 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f870c0f7-8ea4-66b3-3016-e319c3ca44d2/cluster_271b2c1e-f235-9793-e49e-e20dc34e4459/zookeeper_0, clientPort=64390, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f870c0f7-8ea4-66b3-3016-e319c3ca44d2/cluster_271b2c1e-f235-9793-e49e-e20dc34e4459/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f870c0f7-8ea4-66b3-3016-e319c3ca44d2/cluster_271b2c1e-f235-9793-e49e-e20dc34e4459/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-16T12:48:34,052 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=64390 2024-11-16T12:48:34,052 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T12:48:34,055 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T12:48:34,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33883 is added to blk_1073741825_1001 (size=7) 2024-11-16T12:48:34,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46877 is added to blk_1073741825_1001 (size=7) 2024-11-16T12:48:34,067 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7 with version=8 2024-11-16T12:48:34,067 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/hbase-staging 2024-11-16T12:48:34,070 INFO [Time-limited test {}] client.ConnectionUtils(128): master/0450ab8807f5:0 server-side Connection retries=45 2024-11-16T12:48:34,070 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T12:48:34,070 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-16T12:48:34,070 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-16T12:48:34,070 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T12:48:34,070 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-16T12:48:34,070 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-16T12:48:34,070 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-16T12:48:34,071 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:36231 2024-11-16T12:48:34,073 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:36231 connecting to ZooKeeper ensemble=127.0.0.1:64390 2024-11-16T12:48:34,122 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:362310x0, quorum=127.0.0.1:64390, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-16T12:48:34,123 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:36231-0x10144fa2bc70000 connected 2024-11-16T12:48:34,189 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T12:48:34,191 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T12:48:34,194 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36231-0x10144fa2bc70000, quorum=127.0.0.1:64390, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T12:48:34,194 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7, hbase.cluster.distributed=false 2024-11-16T12:48:34,196 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36231-0x10144fa2bc70000, quorum=127.0.0.1:64390, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-16T12:48:34,201 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36231 2024-11-16T12:48:34,201 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36231 2024-11-16T12:48:34,201 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36231 2024-11-16T12:48:34,202 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36231 2024-11-16T12:48:34,202 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36231 2024-11-16T12:48:34,217 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/0450ab8807f5:0 server-side Connection retries=45 2024-11-16T12:48:34,217 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T12:48:34,217 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-16T12:48:34,217 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-16T12:48:34,217 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T12:48:34,218 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-16T12:48:34,218 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-16T12:48:34,218 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-16T12:48:34,218 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34235 2024-11-16T12:48:34,220 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:34235 connecting to ZooKeeper ensemble=127.0.0.1:64390 2024-11-16T12:48:34,220 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T12:48:34,222 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T12:48:34,231 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:342350x0, quorum=127.0.0.1:64390, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-16T12:48:34,231 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:342350x0, quorum=127.0.0.1:64390, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T12:48:34,231 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:34235-0x10144fa2bc70001 connected 2024-11-16T12:48:34,231 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-16T12:48:34,232 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-16T12:48:34,232 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34235-0x10144fa2bc70001, quorum=127.0.0.1:64390, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-16T12:48:34,233 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34235-0x10144fa2bc70001, quorum=127.0.0.1:64390, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-16T12:48:34,234 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34235 2024-11-16T12:48:34,234 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34235 2024-11-16T12:48:34,234 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34235 2024-11-16T12:48:34,235 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34235 2024-11-16T12:48:34,235 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34235 2024-11-16T12:48:34,248 DEBUG [M:0;0450ab8807f5:36231 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;0450ab8807f5:36231 2024-11-16T12:48:34,249 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/0450ab8807f5,36231,1731761314069 2024-11-16T12:48:34,255 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34235-0x10144fa2bc70001, quorum=127.0.0.1:64390, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T12:48:34,255 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36231-0x10144fa2bc70000, quorum=127.0.0.1:64390, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T12:48:34,256 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36231-0x10144fa2bc70000, quorum=127.0.0.1:64390, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/0450ab8807f5,36231,1731761314069 2024-11-16T12:48:34,264 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36231-0x10144fa2bc70000, quorum=127.0.0.1:64390, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:48:34,264 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34235-0x10144fa2bc70001, quorum=127.0.0.1:64390, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-16T12:48:34,264 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34235-0x10144fa2bc70001, quorum=127.0.0.1:64390, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:48:34,264 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36231-0x10144fa2bc70000, quorum=127.0.0.1:64390, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-16T12:48:34,265 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/0450ab8807f5,36231,1731761314069 from backup master directory 2024-11-16T12:48:34,272 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34235-0x10144fa2bc70001, quorum=127.0.0.1:64390, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T12:48:34,272 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36231-0x10144fa2bc70000, quorum=127.0.0.1:64390, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/0450ab8807f5,36231,1731761314069 2024-11-16T12:48:34,272 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36231-0x10144fa2bc70000, quorum=127.0.0.1:64390, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T12:48:34,272 WARN [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-16T12:48:34,272 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=0450ab8807f5,36231,1731761314069 2024-11-16T12:48:34,276 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/hbase.id] with ID: 6f10ad65-7e8c-408c-93c2-fd5f071a5299 2024-11-16T12:48:34,276 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/.tmp/hbase.id 2024-11-16T12:48:34,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33883 is added to blk_1073741826_1002 (size=42) 2024-11-16T12:48:34,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46877 is added to blk_1073741826_1002 (size=42) 2024-11-16T12:48:34,283 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/.tmp/hbase.id]:[hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/hbase.id] 2024-11-16T12:48:34,297 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T12:48:34,297 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-16T12:48:34,299 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-16T12:48:34,305 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34235-0x10144fa2bc70001, quorum=127.0.0.1:64390, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:48:34,305 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36231-0x10144fa2bc70000, quorum=127.0.0.1:64390, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:48:34,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46877 is added to blk_1073741827_1003 (size=196) 2024-11-16T12:48:34,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33883 is added to blk_1073741827_1003 (size=196) 2024-11-16T12:48:34,313 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-16T12:48:34,314 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-16T12:48:34,315 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T12:48:34,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33883 is added to blk_1073741828_1004 (size=1189) 2024-11-16T12:48:34,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46877 is added to blk_1073741828_1004 (size=1189) 2024-11-16T12:48:34,323 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/MasterData/data/master/store 2024-11-16T12:48:34,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33883 is added to blk_1073741829_1005 (size=34) 2024-11-16T12:48:34,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46877 is added to blk_1073741829_1005 (size=34) 2024-11-16T12:48:34,331 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T12:48:34,331 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-16T12:48:34,331 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T12:48:34,331 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T12:48:34,331 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-16T12:48:34,331 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T12:48:34,331 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T12:48:34,331 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731761314331Disabling compacts and flushes for region at 1731761314331Disabling writes for close at 1731761314331Writing region close event to WAL at 1731761314331Closed at 1731761314331 2024-11-16T12:48:34,332 WARN [master/0450ab8807f5:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/MasterData/data/master/store/.initializing 2024-11-16T12:48:34,332 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/MasterData/WALs/0450ab8807f5,36231,1731761314069 2024-11-16T12:48:34,334 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0450ab8807f5%2C36231%2C1731761314069, suffix=, logDir=hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/MasterData/WALs/0450ab8807f5,36231,1731761314069, archiveDir=hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/MasterData/oldWALs, maxLogs=10 2024-11-16T12:48:34,335 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0450ab8807f5%2C36231%2C1731761314069.1731761314335 2024-11-16T12:48:34,344 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/MasterData/WALs/0450ab8807f5,36231,1731761314069/0450ab8807f5%2C36231%2C1731761314069.1731761314335 2024-11-16T12:48:34,345 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36847:36847),(127.0.0.1/127.0.0.1:34079:34079)] 2024-11-16T12:48:34,345 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-16T12:48:34,345 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T12:48:34,345 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T12:48:34,346 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T12:48:34,347 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T12:48:34,348 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-16T12:48:34,348 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:48:34,349 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T12:48:34,349 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T12:48:34,350 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-16T12:48:34,350 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:48:34,351 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T12:48:34,351 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T12:48:34,352 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-16T12:48:34,352 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:48:34,353 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T12:48:34,353 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T12:48:34,354 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-16T12:48:34,354 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:48:34,355 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T12:48:34,354 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:34,355 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T12:48:34,356 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-16T12:48:34,356 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-16T12:48:34,357 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T12:48:34,358 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T12:48:34,359 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-16T12:48:34,360 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T12:48:34,362 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T12:48:34,363 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=810893, jitterRate=0.031104937195777893}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-16T12:48:34,364 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731761314346Initializing all the Stores at 1731761314346Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731761314346Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731761314347 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731761314347Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731761314347Cleaning up temporary data from old regions at 1731761314358 (+11 ms)Region opened successfully at 1731761314364 (+6 ms) 2024-11-16T12:48:34,364 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-16T12:48:34,367 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2fe6ef28, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=0450ab8807f5/172.17.0.2:0 2024-11-16T12:48:34,368 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-16T12:48:34,368 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-16T12:48:34,368 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-16T12:48:34,369 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-16T12:48:34,369 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-16T12:48:34,370 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-16T12:48:34,370 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-16T12:48:34,372 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-16T12:48:34,373 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36231-0x10144fa2bc70000, quorum=127.0.0.1:64390, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-16T12:48:34,405 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-16T12:48:34,406 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-16T12:48:34,407 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36231-0x10144fa2bc70000, quorum=127.0.0.1:64390, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-16T12:48:34,414 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-16T12:48:34,414 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-16T12:48:34,416 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36231-0x10144fa2bc70000, quorum=127.0.0.1:64390, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-16T12:48:34,422 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-16T12:48:34,423 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36231-0x10144fa2bc70000, quorum=127.0.0.1:64390, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-16T12:48:34,430 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-16T12:48:34,433 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36231-0x10144fa2bc70000, quorum=127.0.0.1:64390, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-16T12:48:34,439 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-16T12:48:34,447 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34235-0x10144fa2bc70001, quorum=127.0.0.1:64390, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-16T12:48:34,447 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34235-0x10144fa2bc70001, quorum=127.0.0.1:64390, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:48:34,447 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36231-0x10144fa2bc70000, quorum=127.0.0.1:64390, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-16T12:48:34,447 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36231-0x10144fa2bc70000, quorum=127.0.0.1:64390, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:48:34,448 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=0450ab8807f5,36231,1731761314069, sessionid=0x10144fa2bc70000, setting cluster-up flag (Was=false) 2024-11-16T12:48:34,464 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36231-0x10144fa2bc70000, quorum=127.0.0.1:64390, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:48:34,464 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34235-0x10144fa2bc70001, quorum=127.0.0.1:64390, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:48:34,489 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-16T12:48:34,490 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=0450ab8807f5,36231,1731761314069 2024-11-16T12:48:34,505 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36231-0x10144fa2bc70000, quorum=127.0.0.1:64390, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:48:34,506 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34235-0x10144fa2bc70001, quorum=127.0.0.1:64390, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:48:34,530 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-16T12:48:34,532 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=0450ab8807f5,36231,1731761314069 2024-11-16T12:48:34,533 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-16T12:48:34,535 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-16T12:48:34,535 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-16T12:48:34,535 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-16T12:48:34,536 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 0450ab8807f5,36231,1731761314069 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-16T12:48:34,537 INFO [RS:0;0450ab8807f5:34235 {}] regionserver.HRegionServer(746): ClusterId : 6f10ad65-7e8c-408c-93c2-fd5f071a5299 2024-11-16T12:48:34,537 DEBUG [RS:0;0450ab8807f5:34235 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-16T12:48:34,538 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/0450ab8807f5:0, corePoolSize=5, maxPoolSize=5 2024-11-16T12:48:34,538 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/0450ab8807f5:0, corePoolSize=5, maxPoolSize=5 2024-11-16T12:48:34,538 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/0450ab8807f5:0, corePoolSize=5, maxPoolSize=5 2024-11-16T12:48:34,538 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/0450ab8807f5:0, corePoolSize=5, maxPoolSize=5 2024-11-16T12:48:34,538 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/0450ab8807f5:0, corePoolSize=10, maxPoolSize=10 2024-11-16T12:48:34,538 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:48:34,538 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/0450ab8807f5:0, corePoolSize=2, maxPoolSize=2 2024-11-16T12:48:34,538 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:48:34,540 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T12:48:34,540 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-16T12:48:34,541 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:48:34,541 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-16T12:48:34,548 DEBUG [RS:0;0450ab8807f5:34235 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-16T12:48:34,548 DEBUG [RS:0;0450ab8807f5:34235 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-16T12:48:34,550 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731761344550 2024-11-16T12:48:34,551 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-16T12:48:34,551 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-16T12:48:34,551 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-16T12:48:34,551 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-16T12:48:34,551 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-16T12:48:34,551 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-16T12:48:34,552 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:34,556 DEBUG [RS:0;0450ab8807f5:34235 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-16T12:48:34,557 DEBUG [RS:0;0450ab8807f5:34235 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3caab02d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=0450ab8807f5/172.17.0.2:0 2024-11-16T12:48:34,558 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-16T12:48:34,565 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-16T12:48:34,565 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-16T12:48:34,565 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-16T12:48:34,571 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-16T12:48:34,571 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-16T12:48:34,571 DEBUG [RS:0;0450ab8807f5:34235 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;0450ab8807f5:34235 2024-11-16T12:48:34,571 INFO [RS:0;0450ab8807f5:34235 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-16T12:48:34,571 INFO [RS:0;0450ab8807f5:34235 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-16T12:48:34,571 DEBUG [RS:0;0450ab8807f5:34235 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-16T12:48:34,571 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/0450ab8807f5:0:becomeActiveMaster-HFileCleaner.large.0-1731761314571,5,FailOnTimeoutGroup] 2024-11-16T12:48:34,571 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/0450ab8807f5:0:becomeActiveMaster-HFileCleaner.small.0-1731761314571,5,FailOnTimeoutGroup] 2024-11-16T12:48:34,571 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-16T12:48:34,571 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-16T12:48:34,571 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-16T12:48:34,571 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-16T12:48:34,572 INFO [RS:0;0450ab8807f5:34235 {}] regionserver.HRegionServer(2659): reportForDuty to master=0450ab8807f5,36231,1731761314069 with port=34235, startcode=1731761314217 2024-11-16T12:48:34,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46877 is added to blk_1073741831_1007 (size=1321) 2024-11-16T12:48:34,572 DEBUG [RS:0;0450ab8807f5:34235 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-16T12:48:34,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33883 is added to blk_1073741831_1007 (size=1321) 2024-11-16T12:48:34,573 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-16T12:48:34,574 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7 2024-11-16T12:48:34,588 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53361, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-11-16T12:48:34,588 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36231 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 0450ab8807f5,34235,1731761314217 2024-11-16T12:48:34,588 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36231 {}] master.ServerManager(517): Registering regionserver=0450ab8807f5,34235,1731761314217 2024-11-16T12:48:34,590 DEBUG [RS:0;0450ab8807f5:34235 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7 2024-11-16T12:48:34,590 DEBUG [RS:0;0450ab8807f5:34235 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:33259 2024-11-16T12:48:34,590 DEBUG [RS:0;0450ab8807f5:34235 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-16T12:48:34,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33883 is added to blk_1073741832_1008 (size=32) 2024-11-16T12:48:34,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46877 is added to blk_1073741832_1008 (size=32) 2024-11-16T12:48:34,597 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36231-0x10144fa2bc70000, quorum=127.0.0.1:64390, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-16T12:48:34,597 DEBUG [RS:0;0450ab8807f5:34235 {}] zookeeper.ZKUtil(111): regionserver:34235-0x10144fa2bc70001, quorum=127.0.0.1:64390, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/0450ab8807f5,34235,1731761314217 2024-11-16T12:48:34,597 WARN [RS:0;0450ab8807f5:34235 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-16T12:48:34,598 INFO [RS:0;0450ab8807f5:34235 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T12:48:34,598 DEBUG [RS:0;0450ab8807f5:34235 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/WALs/0450ab8807f5,34235,1731761314217 2024-11-16T12:48:34,598 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [0450ab8807f5,34235,1731761314217] 2024-11-16T12:48:34,605 INFO [RS:0;0450ab8807f5:34235 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-16T12:48:34,607 INFO [RS:0;0450ab8807f5:34235 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-16T12:48:34,607 INFO [RS:0;0450ab8807f5:34235 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-16T12:48:34,607 INFO [RS:0;0450ab8807f5:34235 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T12:48:34,608 INFO [RS:0;0450ab8807f5:34235 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-16T12:48:34,608 INFO [RS:0;0450ab8807f5:34235 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-16T12:48:34,608 INFO [RS:0;0450ab8807f5:34235 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-16T12:48:34,609 DEBUG [RS:0;0450ab8807f5:34235 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:48:34,609 DEBUG [RS:0;0450ab8807f5:34235 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:48:34,609 DEBUG [RS:0;0450ab8807f5:34235 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:48:34,609 DEBUG [RS:0;0450ab8807f5:34235 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:48:34,609 DEBUG [RS:0;0450ab8807f5:34235 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:48:34,609 DEBUG [RS:0;0450ab8807f5:34235 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/0450ab8807f5:0, corePoolSize=2, maxPoolSize=2 2024-11-16T12:48:34,609 DEBUG [RS:0;0450ab8807f5:34235 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:48:34,609 DEBUG [RS:0;0450ab8807f5:34235 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:48:34,609 DEBUG [RS:0;0450ab8807f5:34235 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:48:34,609 DEBUG [RS:0;0450ab8807f5:34235 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:48:34,609 DEBUG [RS:0;0450ab8807f5:34235 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:48:34,609 DEBUG [RS:0;0450ab8807f5:34235 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:48:34,609 DEBUG [RS:0;0450ab8807f5:34235 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/0450ab8807f5:0, corePoolSize=3, maxPoolSize=3 2024-11-16T12:48:34,609 DEBUG [RS:0;0450ab8807f5:34235 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/0450ab8807f5:0, corePoolSize=3, maxPoolSize=3 2024-11-16T12:48:34,610 INFO [RS:0;0450ab8807f5:34235 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T12:48:34,610 INFO [RS:0;0450ab8807f5:34235 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T12:48:34,610 INFO [RS:0;0450ab8807f5:34235 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T12:48:34,610 INFO [RS:0;0450ab8807f5:34235 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-16T12:48:34,610 INFO [RS:0;0450ab8807f5:34235 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-16T12:48:34,610 INFO [RS:0;0450ab8807f5:34235 {}] hbase.ChoreService(168): Chore ScheduledChore name=0450ab8807f5,34235,1731761314217-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-16T12:48:34,625 INFO [RS:0;0450ab8807f5:34235 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-16T12:48:34,625 INFO [RS:0;0450ab8807f5:34235 {}] hbase.ChoreService(168): Chore ScheduledChore name=0450ab8807f5,34235,1731761314217-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T12:48:34,625 INFO [RS:0;0450ab8807f5:34235 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T12:48:34,626 INFO [RS:0;0450ab8807f5:34235 {}] regionserver.Replication(171): 0450ab8807f5,34235,1731761314217 started 2024-11-16T12:48:34,641 INFO [RS:0;0450ab8807f5:34235 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T12:48:34,641 INFO [RS:0;0450ab8807f5:34235 {}] regionserver.HRegionServer(1482): Serving as 0450ab8807f5,34235,1731761314217, RpcServer on 0450ab8807f5/172.17.0.2:34235, sessionid=0x10144fa2bc70001 2024-11-16T12:48:34,641 DEBUG [RS:0;0450ab8807f5:34235 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-16T12:48:34,641 DEBUG [RS:0;0450ab8807f5:34235 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 0450ab8807f5,34235,1731761314217 2024-11-16T12:48:34,641 DEBUG [RS:0;0450ab8807f5:34235 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '0450ab8807f5,34235,1731761314217' 2024-11-16T12:48:34,641 DEBUG [RS:0;0450ab8807f5:34235 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-16T12:48:34,642 DEBUG [RS:0;0450ab8807f5:34235 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-16T12:48:34,642 DEBUG [RS:0;0450ab8807f5:34235 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-16T12:48:34,642 DEBUG [RS:0;0450ab8807f5:34235 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-16T12:48:34,642 DEBUG [RS:0;0450ab8807f5:34235 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 0450ab8807f5,34235,1731761314217 2024-11-16T12:48:34,642 DEBUG [RS:0;0450ab8807f5:34235 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '0450ab8807f5,34235,1731761314217' 2024-11-16T12:48:34,642 DEBUG [RS:0;0450ab8807f5:34235 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-16T12:48:34,643 DEBUG [RS:0;0450ab8807f5:34235 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-16T12:48:34,643 DEBUG [RS:0;0450ab8807f5:34235 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-16T12:48:34,643 INFO [RS:0;0450ab8807f5:34235 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-16T12:48:34,643 INFO [RS:0;0450ab8807f5:34235 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-16T12:48:34,745 INFO [RS:0;0450ab8807f5:34235 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0450ab8807f5%2C34235%2C1731761314217, suffix=, logDir=hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/WALs/0450ab8807f5,34235,1731761314217, archiveDir=hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/oldWALs, maxLogs=32 2024-11-16T12:48:34,745 INFO [RS:0;0450ab8807f5:34235 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0450ab8807f5%2C34235%2C1731761314217.1731761314745 2024-11-16T12:48:34,751 INFO [RS:0;0450ab8807f5:34235 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/WALs/0450ab8807f5,34235,1731761314217/0450ab8807f5%2C34235%2C1731761314217.1731761314745 2024-11-16T12:48:34,752 DEBUG [RS:0;0450ab8807f5:34235 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36847:36847),(127.0.0.1/127.0.0.1:34079:34079)] 2024-11-16T12:48:34,993 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T12:48:34,995 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-16T12:48:34,996 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-16T12:48:34,996 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:48:34,997 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T12:48:34,997 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-16T12:48:34,998 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-16T12:48:34,998 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:48:34,999 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T12:48:34,999 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-16T12:48:35,000 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-16T12:48:35,000 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:48:35,001 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T12:48:35,001 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-16T12:48:35,002 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-16T12:48:35,002 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:48:35,002 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T12:48:35,002 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-16T12:48:35,003 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/data/hbase/meta/1588230740 2024-11-16T12:48:35,003 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/data/hbase/meta/1588230740 2024-11-16T12:48:35,005 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-16T12:48:35,005 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-16T12:48:35,005 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-16T12:48:35,006 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-16T12:48:35,008 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T12:48:35,009 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=751780, jitterRate=-0.0440630167722702}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-16T12:48:35,009 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731761314993Initializing all the Stores at 1731761314994 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731761314994Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731761314994Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731761314994Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731761314994Cleaning up temporary data from old regions at 1731761315005 (+11 ms)Region opened successfully at 1731761315009 (+4 ms) 2024-11-16T12:48:35,009 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-16T12:48:35,009 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-16T12:48:35,010 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-16T12:48:35,010 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-16T12:48:35,010 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-16T12:48:35,010 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-16T12:48:35,010 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731761315009Disabling compacts and flushes for region at 1731761315009Disabling writes for close at 1731761315010 (+1 ms)Writing region close event to WAL at 1731761315010Closed at 1731761315010 2024-11-16T12:48:35,011 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T12:48:35,011 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-16T12:48:35,011 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-16T12:48:35,013 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-16T12:48:35,014 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-16T12:48:35,164 DEBUG [0450ab8807f5:36231 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-16T12:48:35,165 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=0450ab8807f5,34235,1731761314217 2024-11-16T12:48:35,166 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 0450ab8807f5,34235,1731761314217, state=OPENING 2024-11-16T12:48:35,213 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-16T12:48:35,222 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34235-0x10144fa2bc70001, quorum=127.0.0.1:64390, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:48:35,222 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36231-0x10144fa2bc70000, quorum=127.0.0.1:64390, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:48:35,223 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T12:48:35,223 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-16T12:48:35,223 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T12:48:35,223 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=0450ab8807f5,34235,1731761314217}] 2024-11-16T12:48:35,355 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:35,377 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-16T12:48:35,380 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57969, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-16T12:48:35,385 INFO [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-16T12:48:35,385 INFO [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T12:48:35,388 INFO [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0450ab8807f5%2C34235%2C1731761314217.meta, suffix=.meta, logDir=hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/WALs/0450ab8807f5,34235,1731761314217, archiveDir=hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/oldWALs, maxLogs=32 2024-11-16T12:48:35,388 INFO [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 0450ab8807f5%2C34235%2C1731761314217.meta.1731761315388.meta 2024-11-16T12:48:35,393 INFO [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/WALs/0450ab8807f5,34235,1731761314217/0450ab8807f5%2C34235%2C1731761314217.meta.1731761315388.meta 2024-11-16T12:48:35,394 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34079:34079),(127.0.0.1/127.0.0.1:36847:36847)] 2024-11-16T12:48:35,400 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-16T12:48:35,401 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-16T12:48:35,401 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-16T12:48:35,401 INFO [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-16T12:48:35,401 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-16T12:48:35,401 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T12:48:35,401 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-16T12:48:35,401 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-16T12:48:35,402 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-16T12:48:35,403 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-16T12:48:35,403 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:48:35,404 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T12:48:35,404 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-16T12:48:35,405 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-16T12:48:35,405 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:48:35,405 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T12:48:35,405 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-16T12:48:35,406 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-16T12:48:35,406 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:48:35,406 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T12:48:35,406 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-16T12:48:35,407 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-16T12:48:35,407 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:48:35,407 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T12:48:35,407 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-16T12:48:35,408 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/data/hbase/meta/1588230740 2024-11-16T12:48:35,409 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/data/hbase/meta/1588230740 2024-11-16T12:48:35,410 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-16T12:48:35,410 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-16T12:48:35,411 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-16T12:48:35,412 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-16T12:48:35,413 INFO [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=731369, jitterRate=-0.07001666724681854}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-16T12:48:35,413 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-16T12:48:35,414 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731761315401Writing region info on filesystem at 1731761315401Initializing all the Stores at 1731761315402 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731761315402Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731761315402Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731761315402Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731761315402Cleaning up temporary data from old regions at 1731761315410 (+8 ms)Running coprocessor post-open hooks at 1731761315413 (+3 ms)Region opened successfully at 1731761315414 (+1 ms) 2024-11-16T12:48:35,415 INFO [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731761315377 2024-11-16T12:48:35,417 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-16T12:48:35,418 INFO [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-16T12:48:35,418 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=0450ab8807f5,34235,1731761314217 2024-11-16T12:48:35,419 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 0450ab8807f5,34235,1731761314217, state=OPEN 2024-11-16T12:48:35,472 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36231-0x10144fa2bc70000, quorum=127.0.0.1:64390, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-16T12:48:35,472 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34235-0x10144fa2bc70001, quorum=127.0.0.1:64390, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-16T12:48:35,472 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=0450ab8807f5,34235,1731761314217 2024-11-16T12:48:35,473 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T12:48:35,473 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T12:48:35,476 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-16T12:48:35,476 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=0450ab8807f5,34235,1731761314217 in 250 msec 2024-11-16T12:48:35,480 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-16T12:48:35,480 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 465 msec 2024-11-16T12:48:35,481 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T12:48:35,481 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-16T12:48:35,483 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T12:48:35,483 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=0450ab8807f5,34235,1731761314217, seqNum=-1] 2024-11-16T12:48:35,483 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T12:48:35,484 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42243, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T12:48:35,491 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 956 msec 2024-11-16T12:48:35,491 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731761315491, completionTime=-1 2024-11-16T12:48:35,491 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-16T12:48:35,491 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-16T12:48:35,494 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-16T12:48:35,494 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731761375494 2024-11-16T12:48:35,494 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731761435494 2024-11-16T12:48:35,494 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-16T12:48:35,494 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0450ab8807f5,36231,1731761314069-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T12:48:35,494 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0450ab8807f5,36231,1731761314069-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T12:48:35,494 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0450ab8807f5,36231,1731761314069-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T12:48:35,494 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-0450ab8807f5:36231, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T12:48:35,494 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-16T12:48:35,495 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-16T12:48:35,497 DEBUG [master/0450ab8807f5:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-16T12:48:35,499 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.227sec 2024-11-16T12:48:35,499 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-16T12:48:35,499 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-16T12:48:35,499 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-16T12:48:35,499 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-16T12:48:35,499 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-16T12:48:35,499 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0450ab8807f5,36231,1731761314069-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-16T12:48:35,499 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0450ab8807f5,36231,1731761314069-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-16T12:48:35,501 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-16T12:48:35,502 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-16T12:48:35,502 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0450ab8807f5,36231,1731761314069-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T12:48:35,537 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3d320e1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T12:48:35,537 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 0450ab8807f5,36231,-1 for getting cluster id 2024-11-16T12:48:35,538 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-16T12:48:35,540 DEBUG [HMaster-EventLoopGroup-12-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '6f10ad65-7e8c-408c-93c2-fd5f071a5299' 2024-11-16T12:48:35,540 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-16T12:48:35,540 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "6f10ad65-7e8c-408c-93c2-fd5f071a5299" 2024-11-16T12:48:35,540 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2020ad33, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T12:48:35,541 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [0450ab8807f5,36231,-1] 2024-11-16T12:48:35,541 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-16T12:48:35,541 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T12:48:35,542 INFO [HMaster-EventLoopGroup-12-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33626, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-16T12:48:35,544 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@240fc28c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T12:48:35,545 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T12:48:35,546 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=0450ab8807f5,34235,1731761314217, seqNum=-1] 2024-11-16T12:48:35,546 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T12:48:35,548 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41206, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T12:48:35,550 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=0450ab8807f5,36231,1731761314069 2024-11-16T12:48:35,550 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T12:48:35,552 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-16T12:48:35,553 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-16T12:48:35,553 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:35,554 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.AsyncConnectionImpl(321): The fetched master address is 0450ab8807f5,36231,1731761314069 2024-11-16T12:48:35,554 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@970e330 2024-11-16T12:48:35,554 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-16T12:48:35,555 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33636, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-16T12:48:35,556 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36231 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-16T12:48:35,556 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36231 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-16T12:48:35,556 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36231 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testCompactionRecordDoesntBlockRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-16T12:48:35,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36231 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-16T12:48:35,559 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-16T12:48:35,559 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:48:35,559 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36231 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testCompactionRecordDoesntBlockRolling" procId is: 4 2024-11-16T12:48:35,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36231 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-16T12:48:35,561 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-16T12:48:35,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46877 is added to blk_1073741835_1011 (size=405) 2024-11-16T12:48:35,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33883 is added to blk_1073741835_1011 (size=405) 2024-11-16T12:48:35,569 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 36b4266bbc74e82fae52780a1b46b758, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731761315556.36b4266bbc74e82fae52780a1b46b758.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testCompactionRecordDoesntBlockRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7 2024-11-16T12:48:35,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46877 is added to blk_1073741836_1012 (size=88) 2024-11-16T12:48:35,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33883 is added to blk_1073741836_1012 (size=88) 2024-11-16T12:48:35,576 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731761315556.36b4266bbc74e82fae52780a1b46b758.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T12:48:35,576 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1722): Closing 36b4266bbc74e82fae52780a1b46b758, disabling compactions & flushes 2024-11-16T12:48:35,576 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731761315556.36b4266bbc74e82fae52780a1b46b758. 2024-11-16T12:48:35,576 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731761315556.36b4266bbc74e82fae52780a1b46b758. 2024-11-16T12:48:35,576 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731761315556.36b4266bbc74e82fae52780a1b46b758. after waiting 0 ms 2024-11-16T12:48:35,576 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731761315556.36b4266bbc74e82fae52780a1b46b758. 2024-11-16T12:48:35,576 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731761315556.36b4266bbc74e82fae52780a1b46b758. 2024-11-16T12:48:35,576 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 36b4266bbc74e82fae52780a1b46b758: Waiting for close lock at 1731761315576Disabling compacts and flushes for region at 1731761315576Disabling writes for close at 1731761315576Writing region close event to WAL at 1731761315576Closed at 1731761315576 2024-11-16T12:48:35,577 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-16T12:48:35,578 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731761315556.36b4266bbc74e82fae52780a1b46b758.","families":{"info":[{"qualifier":"regioninfo","vlen":87,"tag":[],"timestamp":"1731761315578"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731761315578"}]},"ts":"1731761315578"} 2024-11-16T12:48:35,580 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-16T12:48:35,582 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-16T12:48:35,582 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731761315582"}]},"ts":"1731761315582"} 2024-11-16T12:48:35,584 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLING in hbase:meta 2024-11-16T12:48:35,584 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=36b4266bbc74e82fae52780a1b46b758, ASSIGN}] 2024-11-16T12:48:35,586 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=36b4266bbc74e82fae52780a1b46b758, ASSIGN 2024-11-16T12:48:35,587 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=36b4266bbc74e82fae52780a1b46b758, ASSIGN; state=OFFLINE, location=0450ab8807f5,34235,1731761314217; forceNewPlan=false, retain=false 2024-11-16T12:48:35,738 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=36b4266bbc74e82fae52780a1b46b758, regionState=OPENING, regionLocation=0450ab8807f5,34235,1731761314217 2024-11-16T12:48:35,742 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=36b4266bbc74e82fae52780a1b46b758, ASSIGN because future has completed 2024-11-16T12:48:35,743 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 36b4266bbc74e82fae52780a1b46b758, server=0450ab8807f5,34235,1731761314217}] 2024-11-16T12:48:35,901 INFO [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731761315556.36b4266bbc74e82fae52780a1b46b758. 2024-11-16T12:48:35,901 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 36b4266bbc74e82fae52780a1b46b758, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731761315556.36b4266bbc74e82fae52780a1b46b758.', STARTKEY => '', ENDKEY => ''} 2024-11-16T12:48:35,901 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testCompactionRecordDoesntBlockRolling 36b4266bbc74e82fae52780a1b46b758 2024-11-16T12:48:35,901 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731761315556.36b4266bbc74e82fae52780a1b46b758.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T12:48:35,901 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 36b4266bbc74e82fae52780a1b46b758 2024-11-16T12:48:35,901 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 36b4266bbc74e82fae52780a1b46b758 2024-11-16T12:48:35,903 INFO [StoreOpener-36b4266bbc74e82fae52780a1b46b758-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 36b4266bbc74e82fae52780a1b46b758 2024-11-16T12:48:35,904 INFO [StoreOpener-36b4266bbc74e82fae52780a1b46b758-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 36b4266bbc74e82fae52780a1b46b758 columnFamilyName info 2024-11-16T12:48:35,904 DEBUG [StoreOpener-36b4266bbc74e82fae52780a1b46b758-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:48:35,905 INFO [StoreOpener-36b4266bbc74e82fae52780a1b46b758-1 {}] regionserver.HStore(327): Store=36b4266bbc74e82fae52780a1b46b758/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T12:48:35,905 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 36b4266bbc74e82fae52780a1b46b758 2024-11-16T12:48:35,906 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/36b4266bbc74e82fae52780a1b46b758 2024-11-16T12:48:35,906 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/36b4266bbc74e82fae52780a1b46b758 2024-11-16T12:48:35,907 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 36b4266bbc74e82fae52780a1b46b758 2024-11-16T12:48:35,907 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 36b4266bbc74e82fae52780a1b46b758 2024-11-16T12:48:35,909 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 36b4266bbc74e82fae52780a1b46b758 2024-11-16T12:48:35,912 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/36b4266bbc74e82fae52780a1b46b758/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T12:48:35,913 INFO [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 36b4266bbc74e82fae52780a1b46b758; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=713606, jitterRate=-0.09260410070419312}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-16T12:48:35,913 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 36b4266bbc74e82fae52780a1b46b758 2024-11-16T12:48:35,914 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 36b4266bbc74e82fae52780a1b46b758: Running coprocessor pre-open hook at 1731761315902Writing region info on filesystem at 1731761315902Initializing all the Stores at 1731761315902Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731761315903 (+1 ms)Cleaning up temporary data from old regions at 1731761315907 (+4 ms)Running coprocessor post-open hooks at 1731761315913 (+6 ms)Region opened successfully at 1731761315914 (+1 ms) 2024-11-16T12:48:35,915 INFO [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731761315556.36b4266bbc74e82fae52780a1b46b758., pid=6, masterSystemTime=1731761315897 2024-11-16T12:48:35,919 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731761315556.36b4266bbc74e82fae52780a1b46b758. 2024-11-16T12:48:35,919 INFO [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731761315556.36b4266bbc74e82fae52780a1b46b758. 2024-11-16T12:48:35,920 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=36b4266bbc74e82fae52780a1b46b758, regionState=OPEN, openSeqNum=2, regionLocation=0450ab8807f5,34235,1731761314217 2024-11-16T12:48:35,922 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 36b4266bbc74e82fae52780a1b46b758, server=0450ab8807f5,34235,1731761314217 because future has completed 2024-11-16T12:48:35,926 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-16T12:48:35,926 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 36b4266bbc74e82fae52780a1b46b758, server=0450ab8807f5,34235,1731761314217 in 180 msec 2024-11-16T12:48:35,929 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-16T12:48:35,929 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=36b4266bbc74e82fae52780a1b46b758, ASSIGN in 342 msec 2024-11-16T12:48:35,931 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-16T12:48:35,931 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731761315931"}]},"ts":"1731761315931"} 2024-11-16T12:48:35,933 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLED in hbase:meta 2024-11-16T12:48:35,935 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-16T12:48:35,937 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 379 msec 2024-11-16T12:48:36,356 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:36,554 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:36,950 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-16T12:48:36,950 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-16T12:48:36,951 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-16T12:48:36,951 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-16T12:48:36,951 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-16T12:48:36,951 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling Metrics about Tables on a single HBase RegionServer 2024-11-16T12:48:37,357 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:37,555 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:38,357 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:38,555 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:39,358 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:39,556 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:40,359 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:40,401 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:48:40,402 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:48:40,402 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:48:40,402 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:48:40,402 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:48:40,403 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:48:40,430 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:48:40,431 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:48:40,431 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:48:40,432 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:48:40,432 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:48:40,432 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:48:40,438 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:48:40,438 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:48:40,439 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:48:40,442 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:48:40,557 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:40,948 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-16T12:48:40,950 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:48:40,950 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:48:40,950 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:48:40,950 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:48:40,950 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:48:40,950 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:48:40,970 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:48:40,970 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:48:40,970 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:48:40,970 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:48:40,971 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:48:40,971 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:48:40,974 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:48:40,975 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:48:40,975 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:48:40,977 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:48:40,984 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-16T12:48:40,984 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testCompactionRecordDoesntBlockRolling' 2024-11-16T12:48:41,359 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:41,557 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:42,360 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:42,558 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:43,360 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:43,559 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:44,361 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:44,559 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:45,362 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:45,560 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:45,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36231 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-16T12:48:45,646 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-16T12:48:45,646 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testCompactionRecordDoesntBlockRolling,, stopping at row=TestLogRolling-testCompactionRecordDoesntBlockRolling ,, for max=2147483647 with caching=100 2024-11-16T12:48:45,651 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-16T12:48:45,651 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731761315556.36b4266bbc74e82fae52780a1b46b758. 2024-11-16T12:48:45,656 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testCompactionRecordDoesntBlockRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731761315556.36b4266bbc74e82fae52780a1b46b758., hostname=0450ab8807f5,34235,1731761314217, seqNum=2] 2024-11-16T12:48:45,664 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36231 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-16T12:48:45,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36231 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-16T12:48:45,671 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-16T12:48:45,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36231 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-16T12:48:45,672 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-16T12:48:45,674 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-16T12:48:45,837 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34235 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-16T12:48:45,838 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0450ab8807f5:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731761315556.36b4266bbc74e82fae52780a1b46b758. 2024-11-16T12:48:45,838 INFO [RS_FLUSH_OPERATIONS-regionserver/0450ab8807f5:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 36b4266bbc74e82fae52780a1b46b758 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-16T12:48:45,863 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0450ab8807f5:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/36b4266bbc74e82fae52780a1b46b758/.tmp/info/3455d8cafcdd418f842f91846775842d is 1080, key is row0001/info:/1731761325657/Put/seqid=0 2024-11-16T12:48:45,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33883 is added to blk_1073741837_1013 (size=6033) 2024-11-16T12:48:45,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46877 is added to blk_1073741837_1013 (size=6033) 2024-11-16T12:48:45,869 INFO [RS_FLUSH_OPERATIONS-regionserver/0450ab8807f5:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/36b4266bbc74e82fae52780a1b46b758/.tmp/info/3455d8cafcdd418f842f91846775842d 2024-11-16T12:48:45,876 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0450ab8807f5:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/36b4266bbc74e82fae52780a1b46b758/.tmp/info/3455d8cafcdd418f842f91846775842d as hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/36b4266bbc74e82fae52780a1b46b758/info/3455d8cafcdd418f842f91846775842d 2024-11-16T12:48:45,882 INFO [RS_FLUSH_OPERATIONS-regionserver/0450ab8807f5:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/36b4266bbc74e82fae52780a1b46b758/info/3455d8cafcdd418f842f91846775842d, entries=1, sequenceid=5, filesize=5.9 K 2024-11-16T12:48:45,883 INFO [RS_FLUSH_OPERATIONS-regionserver/0450ab8807f5:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 36b4266bbc74e82fae52780a1b46b758 in 45ms, sequenceid=5, compaction requested=false 2024-11-16T12:48:45,883 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0450ab8807f5:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 36b4266bbc74e82fae52780a1b46b758: 2024-11-16T12:48:45,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0450ab8807f5:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731761315556.36b4266bbc74e82fae52780a1b46b758. 2024-11-16T12:48:45,885 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0450ab8807f5:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-16T12:48:45,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36231 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-16T12:48:45,892 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-16T12:48:45,892 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 214 msec 2024-11-16T12:48:45,895 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 227 msec 2024-11-16T12:48:46,363 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:46,561 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:47,364 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:47,562 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:48,365 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:48,562 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:49,365 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:49,563 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:50,366 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:50,564 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:51,367 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:51,564 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:52,367 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:52,565 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:53,368 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:53,566 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:54,369 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:54,567 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:55,370 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:55,567 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:55,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36231 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-16T12:48:55,716 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-16T12:48:55,721 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36231 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-16T12:48:55,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36231 {}] procedure2.ProcedureExecutor(1139): Stored pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-16T12:48:55,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36231 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-16T12:48:55,725 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-16T12:48:55,727 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-16T12:48:55,727 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-16T12:48:55,881 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34235 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=10 2024-11-16T12:48:55,881 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0450ab8807f5:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731761315556.36b4266bbc74e82fae52780a1b46b758. 2024-11-16T12:48:55,882 INFO [RS_FLUSH_OPERATIONS-regionserver/0450ab8807f5:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2902): Flushing 36b4266bbc74e82fae52780a1b46b758 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-16T12:48:55,890 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0450ab8807f5:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/36b4266bbc74e82fae52780a1b46b758/.tmp/info/c9cada6527854d758cf215cc80444404 is 1080, key is row0002/info:/1731761335718/Put/seqid=0 2024-11-16T12:48:55,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33883 is added to blk_1073741838_1014 (size=6033) 2024-11-16T12:48:55,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46877 is added to blk_1073741838_1014 (size=6033) 2024-11-16T12:48:55,898 INFO [RS_FLUSH_OPERATIONS-regionserver/0450ab8807f5:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/36b4266bbc74e82fae52780a1b46b758/.tmp/info/c9cada6527854d758cf215cc80444404 2024-11-16T12:48:55,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0450ab8807f5:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/36b4266bbc74e82fae52780a1b46b758/.tmp/info/c9cada6527854d758cf215cc80444404 as hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/36b4266bbc74e82fae52780a1b46b758/info/c9cada6527854d758cf215cc80444404 2024-11-16T12:48:55,910 INFO [RS_FLUSH_OPERATIONS-regionserver/0450ab8807f5:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/36b4266bbc74e82fae52780a1b46b758/info/c9cada6527854d758cf215cc80444404, entries=1, sequenceid=9, filesize=5.9 K 2024-11-16T12:48:55,911 INFO [RS_FLUSH_OPERATIONS-regionserver/0450ab8807f5:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 36b4266bbc74e82fae52780a1b46b758 in 30ms, sequenceid=9, compaction requested=false 2024-11-16T12:48:55,911 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0450ab8807f5:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2603): Flush status journal for 36b4266bbc74e82fae52780a1b46b758: 2024-11-16T12:48:55,911 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0450ab8807f5:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731761315556.36b4266bbc74e82fae52780a1b46b758. 2024-11-16T12:48:55,911 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0450ab8807f5:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=10 2024-11-16T12:48:55,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36231 {}] master.HMaster(4169): Remote procedure done, pid=10 2024-11-16T12:48:55,915 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-11-16T12:48:55,915 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 186 msec 2024-11-16T12:48:55,918 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 194 msec 2024-11-16T12:48:56,370 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:56,371 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 after 68050ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor202.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T12:48:56,568 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:56,569 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta after 68051ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor202.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T12:48:57,372 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:57,570 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:58,372 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:58,571 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:59,373 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:48:59,571 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:00,374 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:00,572 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:01,375 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:01,572 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:02,375 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:02,573 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:03,376 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:03,574 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:04,033 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-16T12:49:04,377 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:04,575 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:05,378 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:05,576 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:05,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36231 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-16T12:49:05,796 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-16T12:49:05,799 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0450ab8807f5%2C34235%2C1731761314217.1731761345799 2024-11-16T12:49:05,805 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:49:05,805 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:49:05,805 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:49:05,805 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:49:05,805 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:49:05,805 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/WALs/0450ab8807f5,34235,1731761314217/0450ab8807f5%2C34235%2C1731761314217.1731761314745 with entries=8, filesize=5.41 KB; new WAL /user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/WALs/0450ab8807f5,34235,1731761314217/0450ab8807f5%2C34235%2C1731761314217.1731761345799 2024-11-16T12:49:05,806 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34079:34079),(127.0.0.1/127.0.0.1:36847:36847)] 2024-11-16T12:49:05,806 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/WALs/0450ab8807f5,34235,1731761314217/0450ab8807f5%2C34235%2C1731761314217.1731761314745 is not closed yet, will try archiving it next time 2024-11-16T12:49:05,807 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36231 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-16T12:49:05,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33883 is added to blk_1073741833_1009 (size=5546) 2024-11-16T12:49:05,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46877 is added to blk_1073741833_1009 (size=5546) 2024-11-16T12:49:05,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36231 {}] procedure2.ProcedureExecutor(1139): Stored pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-16T12:49:05,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36231 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-16T12:49:05,811 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-16T12:49:05,812 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-16T12:49:05,812 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-16T12:49:05,966 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34235 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=12 2024-11-16T12:49:05,967 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0450ab8807f5:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731761315556.36b4266bbc74e82fae52780a1b46b758. 2024-11-16T12:49:05,967 INFO [RS_FLUSH_OPERATIONS-regionserver/0450ab8807f5:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2902): Flushing 36b4266bbc74e82fae52780a1b46b758 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-16T12:49:05,974 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0450ab8807f5:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/36b4266bbc74e82fae52780a1b46b758/.tmp/info/fc415e1cf8c049f0a2135bc0d8878a2d is 1080, key is row0003/info:/1731761345797/Put/seqid=0 2024-11-16T12:49:05,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46877 is added to blk_1073741840_1016 (size=6033) 2024-11-16T12:49:05,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33883 is added to blk_1073741840_1016 (size=6033) 2024-11-16T12:49:06,379 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:06,386 INFO [RS_FLUSH_OPERATIONS-regionserver/0450ab8807f5:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/36b4266bbc74e82fae52780a1b46b758/.tmp/info/fc415e1cf8c049f0a2135bc0d8878a2d 2024-11-16T12:49:06,394 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0450ab8807f5:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/36b4266bbc74e82fae52780a1b46b758/.tmp/info/fc415e1cf8c049f0a2135bc0d8878a2d as hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/36b4266bbc74e82fae52780a1b46b758/info/fc415e1cf8c049f0a2135bc0d8878a2d 2024-11-16T12:49:06,401 INFO [RS_FLUSH_OPERATIONS-regionserver/0450ab8807f5:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/36b4266bbc74e82fae52780a1b46b758/info/fc415e1cf8c049f0a2135bc0d8878a2d, entries=1, sequenceid=13, filesize=5.9 K 2024-11-16T12:49:06,402 INFO [RS_FLUSH_OPERATIONS-regionserver/0450ab8807f5:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 36b4266bbc74e82fae52780a1b46b758 in 435ms, sequenceid=13, compaction requested=true 2024-11-16T12:49:06,402 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0450ab8807f5:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2603): Flush status journal for 36b4266bbc74e82fae52780a1b46b758: 2024-11-16T12:49:06,402 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0450ab8807f5:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731761315556.36b4266bbc74e82fae52780a1b46b758. 2024-11-16T12:49:06,403 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0450ab8807f5:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=12 2024-11-16T12:49:06,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36231 {}] master.HMaster(4169): Remote procedure done, pid=12 2024-11-16T12:49:06,408 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-11-16T12:49:06,408 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 592 msec 2024-11-16T12:49:06,411 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 601 msec 2024-11-16T12:49:06,576 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:07,379 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:07,577 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:08,380 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:08,578 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:09,381 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:09,579 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:10,381 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:10,579 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:11,382 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:11,580 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:12,383 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:12,581 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:13,383 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:13,581 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:14,384 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:14,582 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:15,385 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:15,507 INFO [master/0450ab8807f5:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-16T12:49:15,508 INFO [master/0450ab8807f5:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-16T12:49:15,583 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:15,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36231 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-16T12:49:15,896 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-16T12:49:15,896 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-16T12:49:15,898 DEBUG [Time-limited test {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 18099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-16T12:49:15,898 DEBUG [Time-limited test {}] regionserver.HStore(1541): 36b4266bbc74e82fae52780a1b46b758/info is initiating minor compaction (all files) 2024-11-16T12:49:15,898 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-16T12:49:15,899 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T12:49:15,899 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of 36b4266bbc74e82fae52780a1b46b758/info in TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731761315556.36b4266bbc74e82fae52780a1b46b758. 2024-11-16T12:49:15,899 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/36b4266bbc74e82fae52780a1b46b758/info/3455d8cafcdd418f842f91846775842d, hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/36b4266bbc74e82fae52780a1b46b758/info/c9cada6527854d758cf215cc80444404, hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/36b4266bbc74e82fae52780a1b46b758/info/fc415e1cf8c049f0a2135bc0d8878a2d] into tmpdir=hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/36b4266bbc74e82fae52780a1b46b758/.tmp, totalSize=17.7 K 2024-11-16T12:49:15,900 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 3455d8cafcdd418f842f91846775842d, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1731761325657 2024-11-16T12:49:15,900 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting c9cada6527854d758cf215cc80444404, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=9, earliestPutTs=1731761335718 2024-11-16T12:49:15,901 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting fc415e1cf8c049f0a2135bc0d8878a2d, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1731761345797 2024-11-16T12:49:15,918 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): 36b4266bbc74e82fae52780a1b46b758#info#compaction#45 average throughput is 3.08 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-16T12:49:15,919 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/36b4266bbc74e82fae52780a1b46b758/.tmp/info/ba51816865774a82935e29bc47d6137c is 1080, key is row0001/info:/1731761325657/Put/seqid=0 2024-11-16T12:49:15,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33883 is added to blk_1073741841_1017 (size=8296) 2024-11-16T12:49:15,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46877 is added to blk_1073741841_1017 (size=8296) 2024-11-16T12:49:15,933 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/36b4266bbc74e82fae52780a1b46b758/.tmp/info/ba51816865774a82935e29bc47d6137c as hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/36b4266bbc74e82fae52780a1b46b758/info/ba51816865774a82935e29bc47d6137c 2024-11-16T12:49:15,939 INFO [Time-limited test {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 36b4266bbc74e82fae52780a1b46b758/info of 36b4266bbc74e82fae52780a1b46b758 into ba51816865774a82935e29bc47d6137c(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-16T12:49:15,939 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for 36b4266bbc74e82fae52780a1b46b758: 2024-11-16T12:49:15,942 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0450ab8807f5%2C34235%2C1731761314217.1731761355941 2024-11-16T12:49:15,951 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:49:15,951 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:49:15,951 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:49:15,951 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:49:15,951 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:49:15,952 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/WALs/0450ab8807f5,34235,1731761314217/0450ab8807f5%2C34235%2C1731761314217.1731761345799 with entries=4, filesize=2.45 KB; new WAL /user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/WALs/0450ab8807f5,34235,1731761314217/0450ab8807f5%2C34235%2C1731761314217.1731761355941 2024-11-16T12:49:15,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46877 is added to blk_1073741839_1015 (size=2520) 2024-11-16T12:49:15,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33883 is added to blk_1073741839_1015 (size=2520) 2024-11-16T12:49:15,955 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34079:34079),(127.0.0.1/127.0.0.1:36847:36847)] 2024-11-16T12:49:15,963 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/WALs/0450ab8807f5,34235,1731761314217/0450ab8807f5%2C34235%2C1731761314217.1731761314745 to hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/oldWALs/0450ab8807f5%2C34235%2C1731761314217.1731761314745 2024-11-16T12:49:15,964 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36231 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-16T12:49:15,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36231 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-16T12:49:15,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36231 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-16T12:49:15,967 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-16T12:49:15,968 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-16T12:49:15,968 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-16T12:49:16,121 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34235 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=14 2024-11-16T12:49:16,121 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0450ab8807f5:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731761315556.36b4266bbc74e82fae52780a1b46b758. 2024-11-16T12:49:16,121 INFO [RS_FLUSH_OPERATIONS-regionserver/0450ab8807f5:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2902): Flushing 36b4266bbc74e82fae52780a1b46b758 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-16T12:49:16,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0450ab8807f5:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/36b4266bbc74e82fae52780a1b46b758/.tmp/info/847e6ce54c0f44af8d5d7a27ee48b13b is 1080, key is row0000/info:/1731761355940/Put/seqid=0 2024-11-16T12:49:16,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33883 is added to blk_1073741843_1019 (size=6033) 2024-11-16T12:49:16,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46877 is added to blk_1073741843_1019 (size=6033) 2024-11-16T12:49:16,135 INFO [RS_FLUSH_OPERATIONS-regionserver/0450ab8807f5:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/36b4266bbc74e82fae52780a1b46b758/.tmp/info/847e6ce54c0f44af8d5d7a27ee48b13b 2024-11-16T12:49:16,142 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0450ab8807f5:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/36b4266bbc74e82fae52780a1b46b758/.tmp/info/847e6ce54c0f44af8d5d7a27ee48b13b as hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/36b4266bbc74e82fae52780a1b46b758/info/847e6ce54c0f44af8d5d7a27ee48b13b 2024-11-16T12:49:16,149 INFO [RS_FLUSH_OPERATIONS-regionserver/0450ab8807f5:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/36b4266bbc74e82fae52780a1b46b758/info/847e6ce54c0f44af8d5d7a27ee48b13b, entries=1, sequenceid=18, filesize=5.9 K 2024-11-16T12:49:16,150 INFO [RS_FLUSH_OPERATIONS-regionserver/0450ab8807f5:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 36b4266bbc74e82fae52780a1b46b758 in 29ms, sequenceid=18, compaction requested=false 2024-11-16T12:49:16,150 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0450ab8807f5:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for 36b4266bbc74e82fae52780a1b46b758: 2024-11-16T12:49:16,150 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0450ab8807f5:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731761315556.36b4266bbc74e82fae52780a1b46b758. 2024-11-16T12:49:16,150 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0450ab8807f5:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-11-16T12:49:16,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36231 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-11-16T12:49:16,155 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-11-16T12:49:16,155 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 184 msec 2024-11-16T12:49:16,158 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 192 msec 2024-11-16T12:49:16,386 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:16,584 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:17,386 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:17,584 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:18,387 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:18,585 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:19,388 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:19,585 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:20,388 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:20,586 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:20,901 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 36b4266bbc74e82fae52780a1b46b758, had cached 0 bytes from a total of 14329 2024-11-16T12:49:21,389 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:21,587 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:22,390 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:22,587 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:23,391 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:23,588 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:24,392 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:24,589 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:25,392 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:25,590 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:25,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36231 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-16T12:49:25,995 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-16T12:49:25,999 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0450ab8807f5%2C34235%2C1731761314217.1731761365999 2024-11-16T12:49:26,007 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:49:26,007 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:49:26,007 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:49:26,007 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:49:26,007 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:49:26,008 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/WALs/0450ab8807f5,34235,1731761314217/0450ab8807f5%2C34235%2C1731761314217.1731761355941 with entries=3, filesize=1.97 KB; new WAL /user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/WALs/0450ab8807f5,34235,1731761314217/0450ab8807f5%2C34235%2C1731761314217.1731761365999 2024-11-16T12:49:26,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46877 is added to blk_1073741842_1018 (size=2026) 2024-11-16T12:49:26,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33883 is added to blk_1073741842_1018 (size=2026) 2024-11-16T12:49:26,010 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/WALs/0450ab8807f5,34235,1731761314217/0450ab8807f5%2C34235%2C1731761314217.1731761345799 to hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/oldWALs/0450ab8807f5%2C34235%2C1731761314217.1731761345799 2024-11-16T12:49:26,027 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34079:34079),(127.0.0.1/127.0.0.1:36847:36847)] 2024-11-16T12:49:26,027 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-16T12:49:26,027 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-16T12:49:26,027 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T12:49:26,027 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T12:49:26,027 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T12:49:26,027 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-16T12:49:26,027 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-16T12:49:26,027 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=835999515, stopped=false 2024-11-16T12:49:26,027 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=0450ab8807f5,36231,1731761314069 2024-11-16T12:49:26,063 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36231-0x10144fa2bc70000, quorum=127.0.0.1:64390, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-16T12:49:26,063 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34235-0x10144fa2bc70001, quorum=127.0.0.1:64390, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-16T12:49:26,063 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36231-0x10144fa2bc70000, quorum=127.0.0.1:64390, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:49:26,063 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34235-0x10144fa2bc70001, quorum=127.0.0.1:64390, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:49:26,063 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-16T12:49:26,064 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-16T12:49:26,064 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T12:49:26,064 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T12:49:26,064 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '0450ab8807f5,34235,1731761314217' ***** 2024-11-16T12:49:26,064 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-16T12:49:26,064 INFO [RS:0;0450ab8807f5:34235 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-16T12:49:26,065 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-16T12:49:26,065 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:36231-0x10144fa2bc70000, quorum=127.0.0.1:64390, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T12:49:26,065 INFO [RS:0;0450ab8807f5:34235 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-16T12:49:26,065 INFO [RS:0;0450ab8807f5:34235 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-16T12:49:26,065 INFO [RS:0;0450ab8807f5:34235 {}] regionserver.HRegionServer(3091): Received CLOSE for 36b4266bbc74e82fae52780a1b46b758 2024-11-16T12:49:26,065 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:34235-0x10144fa2bc70001, quorum=127.0.0.1:64390, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T12:49:26,065 INFO [RS:0;0450ab8807f5:34235 {}] regionserver.HRegionServer(959): stopping server 0450ab8807f5,34235,1731761314217 2024-11-16T12:49:26,065 INFO [RS:0;0450ab8807f5:34235 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-16T12:49:26,065 INFO [RS:0;0450ab8807f5:34235 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;0450ab8807f5:34235. 2024-11-16T12:49:26,065 DEBUG [RS_CLOSE_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 36b4266bbc74e82fae52780a1b46b758, disabling compactions & flushes 2024-11-16T12:49:26,065 DEBUG [RS:0;0450ab8807f5:34235 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T12:49:26,066 DEBUG [RS:0;0450ab8807f5:34235 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T12:49:26,066 INFO [RS_CLOSE_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731761315556.36b4266bbc74e82fae52780a1b46b758. 2024-11-16T12:49:26,066 DEBUG [RS_CLOSE_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731761315556.36b4266bbc74e82fae52780a1b46b758. 2024-11-16T12:49:26,066 INFO [RS:0;0450ab8807f5:34235 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-16T12:49:26,066 INFO [RS:0;0450ab8807f5:34235 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-16T12:49:26,066 DEBUG [RS_CLOSE_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731761315556.36b4266bbc74e82fae52780a1b46b758. after waiting 0 ms 2024-11-16T12:49:26,066 INFO [RS:0;0450ab8807f5:34235 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-16T12:49:26,066 DEBUG [RS_CLOSE_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731761315556.36b4266bbc74e82fae52780a1b46b758. 2024-11-16T12:49:26,066 INFO [RS:0;0450ab8807f5:34235 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-16T12:49:26,066 INFO [RS_CLOSE_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 36b4266bbc74e82fae52780a1b46b758 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-16T12:49:26,066 INFO [RS:0;0450ab8807f5:34235 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-16T12:49:26,066 DEBUG [RS:0;0450ab8807f5:34235 {}] regionserver.HRegionServer(1325): Online Regions={36b4266bbc74e82fae52780a1b46b758=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731761315556.36b4266bbc74e82fae52780a1b46b758., 1588230740=hbase:meta,,1.1588230740} 2024-11-16T12:49:26,066 DEBUG [RS:0;0450ab8807f5:34235 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 36b4266bbc74e82fae52780a1b46b758 2024-11-16T12:49:26,066 DEBUG [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-16T12:49:26,066 INFO [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-16T12:49:26,066 DEBUG [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-16T12:49:26,066 DEBUG [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-16T12:49:26,066 DEBUG [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-16T12:49:26,066 INFO [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.89 KB heapSize=3.91 KB 2024-11-16T12:49:26,072 DEBUG [RS_CLOSE_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/36b4266bbc74e82fae52780a1b46b758/.tmp/info/0e1924e1635c4a14a18030debfc14cbf is 1080, key is row0001/info:/1731761365997/Put/seqid=0 2024-11-16T12:49:26,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33883 is added to blk_1073741845_1021 (size=6033) 2024-11-16T12:49:26,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46877 is added to blk_1073741845_1021 (size=6033) 2024-11-16T12:49:26,077 INFO [RS_CLOSE_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=22 (bloomFilter=true), to=hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/36b4266bbc74e82fae52780a1b46b758/.tmp/info/0e1924e1635c4a14a18030debfc14cbf 2024-11-16T12:49:26,085 DEBUG [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/data/hbase/meta/1588230740/.tmp/info/7c08f0a905864ee1837ef1888e5bfb13 is 227, key is TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731761315556.36b4266bbc74e82fae52780a1b46b758./info:regioninfo/1731761315919/Put/seqid=0 2024-11-16T12:49:26,087 DEBUG [RS_CLOSE_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/36b4266bbc74e82fae52780a1b46b758/.tmp/info/0e1924e1635c4a14a18030debfc14cbf as hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/36b4266bbc74e82fae52780a1b46b758/info/0e1924e1635c4a14a18030debfc14cbf 2024-11-16T12:49:26,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46877 is added to blk_1073741846_1022 (size=7308) 2024-11-16T12:49:26,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33883 is added to blk_1073741846_1022 (size=7308) 2024-11-16T12:49:26,091 INFO [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.65 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/data/hbase/meta/1588230740/.tmp/info/7c08f0a905864ee1837ef1888e5bfb13 2024-11-16T12:49:26,093 INFO [RS_CLOSE_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/36b4266bbc74e82fae52780a1b46b758/info/0e1924e1635c4a14a18030debfc14cbf, entries=1, sequenceid=22, filesize=5.9 K 2024-11-16T12:49:26,094 INFO [RS_CLOSE_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 36b4266bbc74e82fae52780a1b46b758 in 28ms, sequenceid=22, compaction requested=true 2024-11-16T12:49:26,094 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731761315556.36b4266bbc74e82fae52780a1b46b758.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/36b4266bbc74e82fae52780a1b46b758/info/3455d8cafcdd418f842f91846775842d, hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/36b4266bbc74e82fae52780a1b46b758/info/c9cada6527854d758cf215cc80444404, hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/36b4266bbc74e82fae52780a1b46b758/info/fc415e1cf8c049f0a2135bc0d8878a2d] to archive 2024-11-16T12:49:26,095 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731761315556.36b4266bbc74e82fae52780a1b46b758.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-16T12:49:26,097 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731761315556.36b4266bbc74e82fae52780a1b46b758.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/36b4266bbc74e82fae52780a1b46b758/info/3455d8cafcdd418f842f91846775842d to hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/36b4266bbc74e82fae52780a1b46b758/info/3455d8cafcdd418f842f91846775842d 2024-11-16T12:49:26,098 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731761315556.36b4266bbc74e82fae52780a1b46b758.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/36b4266bbc74e82fae52780a1b46b758/info/c9cada6527854d758cf215cc80444404 to hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/36b4266bbc74e82fae52780a1b46b758/info/c9cada6527854d758cf215cc80444404 2024-11-16T12:49:26,099 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731761315556.36b4266bbc74e82fae52780a1b46b758.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/36b4266bbc74e82fae52780a1b46b758/info/fc415e1cf8c049f0a2135bc0d8878a2d to hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/36b4266bbc74e82fae52780a1b46b758/info/fc415e1cf8c049f0a2135bc0d8878a2d 2024-11-16T12:49:26,100 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731761315556.36b4266bbc74e82fae52780a1b46b758.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=0450ab8807f5:36231 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-11-16T12:49:26,100 WARN [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731761315556.36b4266bbc74e82fae52780a1b46b758.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [3455d8cafcdd418f842f91846775842d=6033, c9cada6527854d758cf215cc80444404=6033, fc415e1cf8c049f0a2135bc0d8878a2d=6033] 2024-11-16T12:49:26,104 DEBUG [RS_CLOSE_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/36b4266bbc74e82fae52780a1b46b758/recovered.edits/25.seqid, newMaxSeqId=25, maxSeqId=1 2024-11-16T12:49:26,105 INFO [RS_CLOSE_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731761315556.36b4266bbc74e82fae52780a1b46b758. 2024-11-16T12:49:26,105 DEBUG [RS_CLOSE_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 36b4266bbc74e82fae52780a1b46b758: Waiting for close lock at 1731761366065Running coprocessor pre-close hooks at 1731761366065Disabling compacts and flushes for region at 1731761366065Disabling writes for close at 1731761366066 (+1 ms)Obtaining lock to block concurrent updates at 1731761366066Preparing flush snapshotting stores in 36b4266bbc74e82fae52780a1b46b758 at 1731761366066Finished memstore snapshotting TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731761315556.36b4266bbc74e82fae52780a1b46b758., syncing WAL and waiting on mvcc, flushsize=dataSize=1076, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1731761366066Flushing stores of TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731761315556.36b4266bbc74e82fae52780a1b46b758. at 1731761366067 (+1 ms)Flushing 36b4266bbc74e82fae52780a1b46b758/info: creating writer at 1731761366067Flushing 36b4266bbc74e82fae52780a1b46b758/info: appending metadata at 1731761366071 (+4 ms)Flushing 36b4266bbc74e82fae52780a1b46b758/info: closing flushed file at 1731761366071Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@16b56f27: reopening flushed file at 1731761366086 (+15 ms)Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 36b4266bbc74e82fae52780a1b46b758 in 28ms, sequenceid=22, compaction requested=true at 1731761366094 (+8 ms)Writing region close event to WAL at 1731761366101 (+7 ms)Running coprocessor post-close hooks at 1731761366105 (+4 ms)Closed at 1731761366105 2024-11-16T12:49:26,105 DEBUG [RS_CLOSE_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731761315556.36b4266bbc74e82fae52780a1b46b758. 2024-11-16T12:49:26,116 DEBUG [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/data/hbase/meta/1588230740/.tmp/ns/2e6a5ff9d4ac4c73a7e6e49bd5fce654 is 43, key is default/ns:d/1731761315485/Put/seqid=0 2024-11-16T12:49:26,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46877 is added to blk_1073741847_1023 (size=5153) 2024-11-16T12:49:26,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33883 is added to blk_1073741847_1023 (size=5153) 2024-11-16T12:49:26,121 INFO [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/data/hbase/meta/1588230740/.tmp/ns/2e6a5ff9d4ac4c73a7e6e49bd5fce654 2024-11-16T12:49:26,142 DEBUG [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/data/hbase/meta/1588230740/.tmp/table/da696bba72b647f591ce758d102a86c8 is 89, key is TestLogRolling-testCompactionRecordDoesntBlockRolling/table:state/1731761315931/Put/seqid=0 2024-11-16T12:49:26,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33883 is added to blk_1073741848_1024 (size=5508) 2024-11-16T12:49:26,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46877 is added to blk_1073741848_1024 (size=5508) 2024-11-16T12:49:26,147 INFO [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=170 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/data/hbase/meta/1588230740/.tmp/table/da696bba72b647f591ce758d102a86c8 2024-11-16T12:49:26,153 DEBUG [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/data/hbase/meta/1588230740/.tmp/info/7c08f0a905864ee1837ef1888e5bfb13 as hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/data/hbase/meta/1588230740/info/7c08f0a905864ee1837ef1888e5bfb13 2024-11-16T12:49:26,159 INFO [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/data/hbase/meta/1588230740/info/7c08f0a905864ee1837ef1888e5bfb13, entries=10, sequenceid=11, filesize=7.1 K 2024-11-16T12:49:26,160 DEBUG [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/data/hbase/meta/1588230740/.tmp/ns/2e6a5ff9d4ac4c73a7e6e49bd5fce654 as hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/data/hbase/meta/1588230740/ns/2e6a5ff9d4ac4c73a7e6e49bd5fce654 2024-11-16T12:49:26,166 INFO [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/data/hbase/meta/1588230740/ns/2e6a5ff9d4ac4c73a7e6e49bd5fce654, entries=2, sequenceid=11, filesize=5.0 K 2024-11-16T12:49:26,167 DEBUG [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/data/hbase/meta/1588230740/.tmp/table/da696bba72b647f591ce758d102a86c8 as hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/data/hbase/meta/1588230740/table/da696bba72b647f591ce758d102a86c8 2024-11-16T12:49:26,173 INFO [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/data/hbase/meta/1588230740/table/da696bba72b647f591ce758d102a86c8, entries=2, sequenceid=11, filesize=5.4 K 2024-11-16T12:49:26,174 INFO [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 108ms, sequenceid=11, compaction requested=false 2024-11-16T12:49:26,179 DEBUG [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-16T12:49:26,179 DEBUG [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-16T12:49:26,179 INFO [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-16T12:49:26,180 DEBUG [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731761366066Running coprocessor pre-close hooks at 1731761366066Disabling compacts and flushes for region at 1731761366066Disabling writes for close at 1731761366066Obtaining lock to block concurrent updates at 1731761366066Preparing flush snapshotting stores in 1588230740 at 1731761366066Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1932, getHeapSize=3936, getOffHeapSize=0, getCellsCount=14 at 1731761366067 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731761366068 (+1 ms)Flushing 1588230740/info: creating writer at 1731761366068Flushing 1588230740/info: appending metadata at 1731761366085 (+17 ms)Flushing 1588230740/info: closing flushed file at 1731761366085Flushing 1588230740/ns: creating writer at 1731761366096 (+11 ms)Flushing 1588230740/ns: appending metadata at 1731761366116 (+20 ms)Flushing 1588230740/ns: closing flushed file at 1731761366116Flushing 1588230740/table: creating writer at 1731761366126 (+10 ms)Flushing 1588230740/table: appending metadata at 1731761366141 (+15 ms)Flushing 1588230740/table: closing flushed file at 1731761366141Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@42417b77: reopening flushed file at 1731761366152 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3b23ada4: reopening flushed file at 1731761366159 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@476f9078: reopening flushed file at 1731761366166 (+7 ms)Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 108ms, sequenceid=11, compaction requested=false at 1731761366174 (+8 ms)Writing region close event to WAL at 1731761366176 (+2 ms)Running coprocessor post-close hooks at 1731761366179 (+3 ms)Closed at 1731761366179 2024-11-16T12:49:26,180 DEBUG [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-16T12:49:26,266 INFO [RS:0;0450ab8807f5:34235 {}] regionserver.HRegionServer(976): stopping server 0450ab8807f5,34235,1731761314217; all regions closed. 2024-11-16T12:49:26,267 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:49:26,267 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:49:26,267 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:49:26,267 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:49:26,267 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:49:26,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46877 is added to blk_1073741834_1010 (size=3306) 2024-11-16T12:49:26,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33883 is added to blk_1073741834_1010 (size=3306) 2024-11-16T12:49:26,272 DEBUG [RS:0;0450ab8807f5:34235 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/oldWALs 2024-11-16T12:49:26,272 INFO [RS:0;0450ab8807f5:34235 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 0450ab8807f5%2C34235%2C1731761314217.meta:.meta(num 1731761315388) 2024-11-16T12:49:26,273 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:49:26,273 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:49:26,273 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:49:26,273 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:49:26,273 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:49:26,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46877 is added to blk_1073741844_1020 (size=1252) 2024-11-16T12:49:26,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33883 is added to blk_1073741844_1020 (size=1252) 2024-11-16T12:49:26,280 DEBUG [RS:0;0450ab8807f5:34235 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/oldWALs 2024-11-16T12:49:26,280 INFO [RS:0;0450ab8807f5:34235 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 0450ab8807f5%2C34235%2C1731761314217:(num 1731761365999) 2024-11-16T12:49:26,280 DEBUG [RS:0;0450ab8807f5:34235 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T12:49:26,280 INFO [RS:0;0450ab8807f5:34235 {}] regionserver.LeaseManager(133): Closed leases 2024-11-16T12:49:26,280 INFO [RS:0;0450ab8807f5:34235 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-16T12:49:26,280 INFO [RS:0;0450ab8807f5:34235 {}] hbase.ChoreService(370): Chore service for: regionserver/0450ab8807f5:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-16T12:49:26,280 INFO [RS:0;0450ab8807f5:34235 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-16T12:49:26,280 INFO [regionserver/0450ab8807f5:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-16T12:49:26,281 INFO [RS:0;0450ab8807f5:34235 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34235 2024-11-16T12:49:26,393 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:26,408 INFO [RS:0;0450ab8807f5:34235 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-16T12:49:26,408 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34235-0x10144fa2bc70001, quorum=127.0.0.1:64390, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/0450ab8807f5,34235,1731761314217 2024-11-16T12:49:26,408 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36231-0x10144fa2bc70000, quorum=127.0.0.1:64390, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-16T12:49:26,456 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [0450ab8807f5,34235,1731761314217] 2024-11-16T12:49:26,463 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/0450ab8807f5,34235,1731761314217 already deleted, retry=false 2024-11-16T12:49:26,463 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 0450ab8807f5,34235,1731761314217 expired; onlineServers=0 2024-11-16T12:49:26,463 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '0450ab8807f5,36231,1731761314069' ***** 2024-11-16T12:49:26,463 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-16T12:49:26,463 INFO [M:0;0450ab8807f5:36231 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-16T12:49:26,463 INFO [M:0;0450ab8807f5:36231 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-16T12:49:26,463 DEBUG [M:0;0450ab8807f5:36231 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-16T12:49:26,463 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-16T12:49:26,463 DEBUG [M:0;0450ab8807f5:36231 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-16T12:49:26,463 DEBUG [master/0450ab8807f5:0:becomeActiveMaster-HFileCleaner.large.0-1731761314571 {}] cleaner.HFileCleaner(306): Exit Thread[master/0450ab8807f5:0:becomeActiveMaster-HFileCleaner.large.0-1731761314571,5,FailOnTimeoutGroup] 2024-11-16T12:49:26,464 DEBUG [master/0450ab8807f5:0:becomeActiveMaster-HFileCleaner.small.0-1731761314571 {}] cleaner.HFileCleaner(306): Exit Thread[master/0450ab8807f5:0:becomeActiveMaster-HFileCleaner.small.0-1731761314571,5,FailOnTimeoutGroup] 2024-11-16T12:49:26,464 INFO [M:0;0450ab8807f5:36231 {}] hbase.ChoreService(370): Chore service for: master/0450ab8807f5:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-16T12:49:26,464 INFO [M:0;0450ab8807f5:36231 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-16T12:49:26,464 DEBUG [M:0;0450ab8807f5:36231 {}] master.HMaster(1795): Stopping service threads 2024-11-16T12:49:26,464 INFO [M:0;0450ab8807f5:36231 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-16T12:49:26,464 INFO [M:0;0450ab8807f5:36231 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-16T12:49:26,464 INFO [M:0;0450ab8807f5:36231 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-16T12:49:26,465 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-16T12:49:26,471 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36231-0x10144fa2bc70000, quorum=127.0.0.1:64390, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-16T12:49:26,471 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36231-0x10144fa2bc70000, quorum=127.0.0.1:64390, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:49:26,471 DEBUG [M:0;0450ab8807f5:36231 {}] zookeeper.ZKUtil(347): master:36231-0x10144fa2bc70000, quorum=127.0.0.1:64390, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-16T12:49:26,472 WARN [M:0;0450ab8807f5:36231 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-16T12:49:26,472 INFO [M:0;0450ab8807f5:36231 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/.lastflushedseqids 2024-11-16T12:49:26,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33883 is added to blk_1073741849_1025 (size=130) 2024-11-16T12:49:26,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46877 is added to blk_1073741849_1025 (size=130) 2024-11-16T12:49:26,478 INFO [M:0;0450ab8807f5:36231 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-16T12:49:26,478 INFO [M:0;0450ab8807f5:36231 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-16T12:49:26,478 DEBUG [M:0;0450ab8807f5:36231 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-16T12:49:26,478 INFO [M:0;0450ab8807f5:36231 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T12:49:26,478 DEBUG [M:0;0450ab8807f5:36231 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T12:49:26,478 DEBUG [M:0;0450ab8807f5:36231 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-16T12:49:26,478 DEBUG [M:0;0450ab8807f5:36231 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T12:49:26,479 INFO [M:0;0450ab8807f5:36231 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=43.59 KB heapSize=55 KB 2024-11-16T12:49:26,496 DEBUG [M:0;0450ab8807f5:36231 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a4e907f771e94fc8a98e4b5808a8cdd6 is 82, key is hbase:meta,,1/info:regioninfo/1731761315418/Put/seqid=0 2024-11-16T12:49:26,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46877 is added to blk_1073741850_1026 (size=5672) 2024-11-16T12:49:26,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33883 is added to blk_1073741850_1026 (size=5672) 2024-11-16T12:49:26,501 INFO [M:0;0450ab8807f5:36231 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a4e907f771e94fc8a98e4b5808a8cdd6 2024-11-16T12:49:26,521 DEBUG [M:0;0450ab8807f5:36231 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/de4043a66ece494492451b7e5e87e64d is 798, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731761315936/Put/seqid=0 2024-11-16T12:49:26,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46877 is added to blk_1073741851_1027 (size=7823) 2024-11-16T12:49:26,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33883 is added to blk_1073741851_1027 (size=7823) 2024-11-16T12:49:26,526 INFO [M:0;0450ab8807f5:36231 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.99 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/de4043a66ece494492451b7e5e87e64d 2024-11-16T12:49:26,530 INFO [M:0;0450ab8807f5:36231 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for de4043a66ece494492451b7e5e87e64d 2024-11-16T12:49:26,545 DEBUG [M:0;0450ab8807f5:36231 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/44fc462d15854e9d987dc342d608b7c7 is 69, key is 0450ab8807f5,34235,1731761314217/rs:state/1731761314589/Put/seqid=0 2024-11-16T12:49:26,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46877 is added to blk_1073741852_1028 (size=5156) 2024-11-16T12:49:26,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33883 is added to blk_1073741852_1028 (size=5156) 2024-11-16T12:49:26,550 INFO [M:0;0450ab8807f5:36231 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/44fc462d15854e9d987dc342d608b7c7 2024-11-16T12:49:26,556 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34235-0x10144fa2bc70001, quorum=127.0.0.1:64390, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T12:49:26,556 INFO [RS:0;0450ab8807f5:34235 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-16T12:49:26,556 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34235-0x10144fa2bc70001, quorum=127.0.0.1:64390, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T12:49:26,556 INFO [RS:0;0450ab8807f5:34235 {}] regionserver.HRegionServer(1031): Exiting; stopping=0450ab8807f5,34235,1731761314217; zookeeper connection closed. 2024-11-16T12:49:26,556 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@cba3b6b {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@cba3b6b 2024-11-16T12:49:26,557 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-16T12:49:26,574 DEBUG [M:0;0450ab8807f5:36231 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/4916a022744d413d919180096cc99ffe is 52, key is load_balancer_on/state:d/1731761315551/Put/seqid=0 2024-11-16T12:49:26,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46877 is added to blk_1073741853_1029 (size=5056) 2024-11-16T12:49:26,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33883 is added to blk_1073741853_1029 (size=5056) 2024-11-16T12:49:26,579 INFO [M:0;0450ab8807f5:36231 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/4916a022744d413d919180096cc99ffe 2024-11-16T12:49:26,585 DEBUG [M:0;0450ab8807f5:36231 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a4e907f771e94fc8a98e4b5808a8cdd6 as hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/a4e907f771e94fc8a98e4b5808a8cdd6 2024-11-16T12:49:26,590 INFO [M:0;0450ab8807f5:36231 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/a4e907f771e94fc8a98e4b5808a8cdd6, entries=8, sequenceid=121, filesize=5.5 K 2024-11-16T12:49:26,590 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:26,591 DEBUG [M:0;0450ab8807f5:36231 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/de4043a66ece494492451b7e5e87e64d as hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/de4043a66ece494492451b7e5e87e64d 2024-11-16T12:49:26,596 INFO [M:0;0450ab8807f5:36231 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for de4043a66ece494492451b7e5e87e64d 2024-11-16T12:49:26,596 INFO [M:0;0450ab8807f5:36231 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/de4043a66ece494492451b7e5e87e64d, entries=14, sequenceid=121, filesize=7.6 K 2024-11-16T12:49:26,597 DEBUG [M:0;0450ab8807f5:36231 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/44fc462d15854e9d987dc342d608b7c7 as hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/44fc462d15854e9d987dc342d608b7c7 2024-11-16T12:49:26,603 INFO [M:0;0450ab8807f5:36231 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/44fc462d15854e9d987dc342d608b7c7, entries=1, sequenceid=121, filesize=5.0 K 2024-11-16T12:49:26,605 DEBUG [M:0;0450ab8807f5:36231 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/4916a022744d413d919180096cc99ffe as hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/4916a022744d413d919180096cc99ffe 2024-11-16T12:49:26,611 INFO [M:0;0450ab8807f5:36231 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33259/user/jenkins/test-data/351ed801-9e52-b7e4-1b41-aeb339e000b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/4916a022744d413d919180096cc99ffe, entries=1, sequenceid=121, filesize=4.9 K 2024-11-16T12:49:26,612 INFO [M:0;0450ab8807f5:36231 {}] regionserver.HRegion(3140): Finished flush of dataSize ~43.59 KB/44638, heapSize ~54.94 KB/56256, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 134ms, sequenceid=121, compaction requested=false 2024-11-16T12:49:26,612 INFO [regionserver/0450ab8807f5:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-16T12:49:26,613 INFO [M:0;0450ab8807f5:36231 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T12:49:26,613 DEBUG [M:0;0450ab8807f5:36231 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731761366478Disabling compacts and flushes for region at 1731761366478Disabling writes for close at 1731761366478Obtaining lock to block concurrent updates at 1731761366479 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731761366479Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=44638, getHeapSize=56256, getOffHeapSize=0, getCellsCount=140 at 1731761366479Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731761366480 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731761366480Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731761366496 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731761366496Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731761366506 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731761366521 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731761366521Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731761366530 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731761366544 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731761366545 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731761366555 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731761366574 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731761366574Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3c796834: reopening flushed file at 1731761366584 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@774e9887: reopening flushed file at 1731761366590 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@f7c5600: reopening flushed file at 1731761366596 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5c71f1c9: reopening flushed file at 1731761366604 (+8 ms)Finished flush of dataSize ~43.59 KB/44638, heapSize ~54.94 KB/56256, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 134ms, sequenceid=121, compaction requested=false at 1731761366612 (+8 ms)Writing region close event to WAL at 1731761366613 (+1 ms)Closed at 1731761366613 2024-11-16T12:49:26,614 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:49:26,614 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:49:26,614 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:49:26,614 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:49:26,614 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:49:26,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33883 is added to blk_1073741830_1006 (size=53035) 2024-11-16T12:49:26,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46877 is added to blk_1073741830_1006 (size=53035) 2024-11-16T12:49:26,617 INFO [M:0;0450ab8807f5:36231 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-16T12:49:26,617 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-16T12:49:26,617 INFO [M:0;0450ab8807f5:36231 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:36231 2024-11-16T12:49:26,617 INFO [M:0;0450ab8807f5:36231 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-16T12:49:26,735 INFO [M:0;0450ab8807f5:36231 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-16T12:49:26,735 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36231-0x10144fa2bc70000, quorum=127.0.0.1:64390, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T12:49:26,736 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36231-0x10144fa2bc70000, quorum=127.0.0.1:64390, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T12:49:26,738 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@339093f7{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T12:49:26,738 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@720d391f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T12:49:26,738 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T12:49:26,738 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@77b33594{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T12:49:26,738 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@69ea5868{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f870c0f7-8ea4-66b3-3016-e319c3ca44d2/hadoop.log.dir/,STOPPED} 2024-11-16T12:49:26,740 WARN [BP-1518043770-172.17.0.2-1731761312484 heartbeating to localhost/127.0.0.1:33259 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T12:49:26,740 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T12:49:26,740 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T12:49:26,740 WARN [BP-1518043770-172.17.0.2-1731761312484 heartbeating to localhost/127.0.0.1:33259 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1518043770-172.17.0.2-1731761312484 (Datanode Uuid 46937fe2-e34e-46a3-b22f-f4bc216f0bb0) service to localhost/127.0.0.1:33259 2024-11-16T12:49:26,741 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f870c0f7-8ea4-66b3-3016-e319c3ca44d2/cluster_271b2c1e-f235-9793-e49e-e20dc34e4459/data/data3/current/BP-1518043770-172.17.0.2-1731761312484 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T12:49:26,741 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f870c0f7-8ea4-66b3-3016-e319c3ca44d2/cluster_271b2c1e-f235-9793-e49e-e20dc34e4459/data/data4/current/BP-1518043770-172.17.0.2-1731761312484 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T12:49:26,741 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T12:49:26,743 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5c7d522f{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T12:49:26,743 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@14a6d451{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T12:49:26,743 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T12:49:26,744 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@448bc78{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T12:49:26,744 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@757e4aa2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f870c0f7-8ea4-66b3-3016-e319c3ca44d2/hadoop.log.dir/,STOPPED} 2024-11-16T12:49:26,745 WARN [BP-1518043770-172.17.0.2-1731761312484 heartbeating to localhost/127.0.0.1:33259 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T12:49:26,745 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T12:49:26,745 WARN [BP-1518043770-172.17.0.2-1731761312484 heartbeating to localhost/127.0.0.1:33259 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1518043770-172.17.0.2-1731761312484 (Datanode Uuid 6857ad7e-8171-45ee-a58a-6b344fbd5877) service to localhost/127.0.0.1:33259 2024-11-16T12:49:26,745 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T12:49:26,746 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f870c0f7-8ea4-66b3-3016-e319c3ca44d2/cluster_271b2c1e-f235-9793-e49e-e20dc34e4459/data/data1/current/BP-1518043770-172.17.0.2-1731761312484 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T12:49:26,746 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f870c0f7-8ea4-66b3-3016-e319c3ca44d2/cluster_271b2c1e-f235-9793-e49e-e20dc34e4459/data/data2/current/BP-1518043770-172.17.0.2-1731761312484 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T12:49:26,746 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T12:49:26,753 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@61fcc471{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-16T12:49:26,753 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@74b5ebca{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T12:49:26,754 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T12:49:26,754 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@438a440e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T12:49:26,754 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@72ef9fa2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f870c0f7-8ea4-66b3-3016-e319c3ca44d2/hadoop.log.dir/,STOPPED} 2024-11-16T12:49:26,760 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-16T12:49:26,777 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-16T12:49:26,784 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=208 (was 182) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33259 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.5@localhost:33259 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33259 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:33259 from jenkins.hfs.5 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-37-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33259 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:33259 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33259 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:33259 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:33259 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) - Thread LEAK? -, OpenFileDescriptor=486 (was 457) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=229 (was 230), ProcessCount=11 (was 11), AvailableMemoryMB=2842 (was 3056) 2024-11-16T12:49:26,792 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRolling Thread=208, OpenFileDescriptor=486, MaxFileDescriptor=1048576, SystemLoadAverage=229, ProcessCount=11, AvailableMemoryMB=2842 2024-11-16T12:49:26,792 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-16T12:49:26,792 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f870c0f7-8ea4-66b3-3016-e319c3ca44d2/hadoop.log.dir so I do NOT create it in target/test-data/6be5e90c-05d0-eec2-fb64-f2e1aecaeaf6 2024-11-16T12:49:26,792 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f870c0f7-8ea4-66b3-3016-e319c3ca44d2/hadoop.tmp.dir so I do NOT create it in target/test-data/6be5e90c-05d0-eec2-fb64-f2e1aecaeaf6 2024-11-16T12:49:26,792 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6be5e90c-05d0-eec2-fb64-f2e1aecaeaf6/cluster_f97aeae6-9f63-e107-b4f0-d9a7ad2afc9f, deleteOnExit=true 2024-11-16T12:49:26,792 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-16T12:49:26,792 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6be5e90c-05d0-eec2-fb64-f2e1aecaeaf6/test.cache.data in system properties and HBase conf 2024-11-16T12:49:26,792 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6be5e90c-05d0-eec2-fb64-f2e1aecaeaf6/hadoop.tmp.dir in system properties and HBase conf 2024-11-16T12:49:26,793 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6be5e90c-05d0-eec2-fb64-f2e1aecaeaf6/hadoop.log.dir in system properties and HBase conf 2024-11-16T12:49:26,793 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6be5e90c-05d0-eec2-fb64-f2e1aecaeaf6/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-16T12:49:26,793 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6be5e90c-05d0-eec2-fb64-f2e1aecaeaf6/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-16T12:49:26,793 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-16T12:49:26,793 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-16T12:49:26,793 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6be5e90c-05d0-eec2-fb64-f2e1aecaeaf6/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-16T12:49:26,793 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6be5e90c-05d0-eec2-fb64-f2e1aecaeaf6/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-16T12:49:26,793 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6be5e90c-05d0-eec2-fb64-f2e1aecaeaf6/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-16T12:49:26,793 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6be5e90c-05d0-eec2-fb64-f2e1aecaeaf6/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-16T12:49:26,793 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6be5e90c-05d0-eec2-fb64-f2e1aecaeaf6/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-16T12:49:26,793 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6be5e90c-05d0-eec2-fb64-f2e1aecaeaf6/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-16T12:49:26,793 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6be5e90c-05d0-eec2-fb64-f2e1aecaeaf6/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-16T12:49:26,793 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6be5e90c-05d0-eec2-fb64-f2e1aecaeaf6/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-16T12:49:26,794 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6be5e90c-05d0-eec2-fb64-f2e1aecaeaf6/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-16T12:49:26,794 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6be5e90c-05d0-eec2-fb64-f2e1aecaeaf6/nfs.dump.dir in system properties and HBase conf 2024-11-16T12:49:26,794 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6be5e90c-05d0-eec2-fb64-f2e1aecaeaf6/java.io.tmpdir in system properties and HBase conf 2024-11-16T12:49:26,794 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6be5e90c-05d0-eec2-fb64-f2e1aecaeaf6/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-16T12:49:26,794 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6be5e90c-05d0-eec2-fb64-f2e1aecaeaf6/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-16T12:49:26,794 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6be5e90c-05d0-eec2-fb64-f2e1aecaeaf6/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-16T12:49:26,808 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-16T12:49:26,950 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-16T12:49:26,950 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-16T12:49:26,951 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-16T12:49:26,951 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-16T12:49:27,054 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T12:49:27,058 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T12:49:27,059 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T12:49:27,059 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T12:49:27,059 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-16T12:49:27,060 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T12:49:27,060 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@42d8f83b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6be5e90c-05d0-eec2-fb64-f2e1aecaeaf6/hadoop.log.dir/,AVAILABLE} 2024-11-16T12:49:27,060 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@629cd82f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T12:49:27,164 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2eb912ab{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6be5e90c-05d0-eec2-fb64-f2e1aecaeaf6/java.io.tmpdir/jetty-localhost-42823-hadoop-hdfs-3_4_1-tests_jar-_-any-3371739444706718171/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-16T12:49:27,164 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@37b300d0{HTTP/1.1, (http/1.1)}{localhost:42823} 2024-11-16T12:49:27,164 INFO [Time-limited test {}] server.Server(415): Started @246051ms 2024-11-16T12:49:27,176 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-16T12:49:27,394 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:27,401 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T12:49:27,404 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T12:49:27,405 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T12:49:27,405 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T12:49:27,405 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-16T12:49:27,405 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6a5db76d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6be5e90c-05d0-eec2-fb64-f2e1aecaeaf6/hadoop.log.dir/,AVAILABLE} 2024-11-16T12:49:27,406 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@286b8c80{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T12:49:27,517 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@72925ee1{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6be5e90c-05d0-eec2-fb64-f2e1aecaeaf6/java.io.tmpdir/jetty-localhost-42001-hadoop-hdfs-3_4_1-tests_jar-_-any-3434940968326739488/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T12:49:27,517 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6f4aa33e{HTTP/1.1, (http/1.1)}{localhost:42001} 2024-11-16T12:49:27,517 INFO [Time-limited test {}] server.Server(415): Started @246404ms 2024-11-16T12:49:27,519 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T12:49:27,548 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T12:49:27,551 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T12:49:27,552 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T12:49:27,552 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T12:49:27,552 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-16T12:49:27,552 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@48b1d1cb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6be5e90c-05d0-eec2-fb64-f2e1aecaeaf6/hadoop.log.dir/,AVAILABLE} 2024-11-16T12:49:27,552 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5da9c931{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T12:49:27,591 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:27,652 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@603e6d9b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6be5e90c-05d0-eec2-fb64-f2e1aecaeaf6/java.io.tmpdir/jetty-localhost-44941-hadoop-hdfs-3_4_1-tests_jar-_-any-14154838092530385945/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T12:49:27,652 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@58ab8e7a{HTTP/1.1, (http/1.1)}{localhost:44941} 2024-11-16T12:49:27,652 INFO [Time-limited test {}] server.Server(415): Started @246540ms 2024-11-16T12:49:27,653 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T12:49:28,138 WARN [Thread-1967 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6be5e90c-05d0-eec2-fb64-f2e1aecaeaf6/cluster_f97aeae6-9f63-e107-b4f0-d9a7ad2afc9f/data/data1/current/BP-1746512817-172.17.0.2-1731761366811/current, will proceed with Du for space computation calculation, 2024-11-16T12:49:28,138 WARN [Thread-1968 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6be5e90c-05d0-eec2-fb64-f2e1aecaeaf6/cluster_f97aeae6-9f63-e107-b4f0-d9a7ad2afc9f/data/data2/current/BP-1746512817-172.17.0.2-1731761366811/current, will proceed with Du for space computation calculation, 2024-11-16T12:49:28,165 WARN [Thread-1932 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T12:49:28,167 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd0232364ca47ff61 with lease ID 0xd2f0a5aa081688a0: Processing first storage report for DS-96f75d2c-c4f8-4b43-bde5-995c982b183e from datanode DatanodeRegistration(127.0.0.1:38281, datanodeUuid=1e9107ee-5017-401a-a398-1aa41c5fb377, infoPort=34389, infoSecurePort=0, ipcPort=44159, storageInfo=lv=-57;cid=testClusterID;nsid=394156228;c=1731761366811) 2024-11-16T12:49:28,167 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd0232364ca47ff61 with lease ID 0xd2f0a5aa081688a0: from storage DS-96f75d2c-c4f8-4b43-bde5-995c982b183e node DatanodeRegistration(127.0.0.1:38281, datanodeUuid=1e9107ee-5017-401a-a398-1aa41c5fb377, infoPort=34389, infoSecurePort=0, ipcPort=44159, storageInfo=lv=-57;cid=testClusterID;nsid=394156228;c=1731761366811), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-16T12:49:28,167 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd0232364ca47ff61 with lease ID 0xd2f0a5aa081688a0: Processing first storage report for DS-23b80a16-42e7-4248-a7fe-4a3f6af51a74 from datanode DatanodeRegistration(127.0.0.1:38281, datanodeUuid=1e9107ee-5017-401a-a398-1aa41c5fb377, infoPort=34389, infoSecurePort=0, ipcPort=44159, storageInfo=lv=-57;cid=testClusterID;nsid=394156228;c=1731761366811) 2024-11-16T12:49:28,168 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd0232364ca47ff61 with lease ID 0xd2f0a5aa081688a0: from storage DS-23b80a16-42e7-4248-a7fe-4a3f6af51a74 node DatanodeRegistration(127.0.0.1:38281, datanodeUuid=1e9107ee-5017-401a-a398-1aa41c5fb377, infoPort=34389, infoSecurePort=0, ipcPort=44159, storageInfo=lv=-57;cid=testClusterID;nsid=394156228;c=1731761366811), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T12:49:28,327 WARN [Thread-1979 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6be5e90c-05d0-eec2-fb64-f2e1aecaeaf6/cluster_f97aeae6-9f63-e107-b4f0-d9a7ad2afc9f/data/data3/current/BP-1746512817-172.17.0.2-1731761366811/current, will proceed with Du for space computation calculation, 2024-11-16T12:49:28,327 WARN [Thread-1980 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6be5e90c-05d0-eec2-fb64-f2e1aecaeaf6/cluster_f97aeae6-9f63-e107-b4f0-d9a7ad2afc9f/data/data4/current/BP-1746512817-172.17.0.2-1731761366811/current, will proceed with Du for space computation calculation, 2024-11-16T12:49:28,347 WARN [Thread-1955 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T12:49:28,348 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xbb167b6697c8ac9d with lease ID 0xd2f0a5aa081688a1: Processing first storage report for DS-7b62f5d0-dfaf-4969-8257-43d7316ecb0b from datanode DatanodeRegistration(127.0.0.1:42515, datanodeUuid=984e258d-2436-4997-8aea-d0238996180a, infoPort=33791, infoSecurePort=0, ipcPort=44095, storageInfo=lv=-57;cid=testClusterID;nsid=394156228;c=1731761366811) 2024-11-16T12:49:28,349 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbb167b6697c8ac9d with lease ID 0xd2f0a5aa081688a1: from storage DS-7b62f5d0-dfaf-4969-8257-43d7316ecb0b node DatanodeRegistration(127.0.0.1:42515, datanodeUuid=984e258d-2436-4997-8aea-d0238996180a, infoPort=33791, infoSecurePort=0, ipcPort=44095, storageInfo=lv=-57;cid=testClusterID;nsid=394156228;c=1731761366811), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T12:49:28,349 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xbb167b6697c8ac9d with lease ID 0xd2f0a5aa081688a1: Processing first storage report for DS-0e0d8e36-8d9e-40a6-9c95-a2e0b42b76f7 from datanode DatanodeRegistration(127.0.0.1:42515, datanodeUuid=984e258d-2436-4997-8aea-d0238996180a, infoPort=33791, infoSecurePort=0, ipcPort=44095, storageInfo=lv=-57;cid=testClusterID;nsid=394156228;c=1731761366811) 2024-11-16T12:49:28,349 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbb167b6697c8ac9d with lease ID 0xd2f0a5aa081688a1: from storage DS-0e0d8e36-8d9e-40a6-9c95-a2e0b42b76f7 node DatanodeRegistration(127.0.0.1:42515, datanodeUuid=984e258d-2436-4997-8aea-d0238996180a, infoPort=33791, infoSecurePort=0, ipcPort=44095, storageInfo=lv=-57;cid=testClusterID;nsid=394156228;c=1731761366811), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T12:49:28,386 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6be5e90c-05d0-eec2-fb64-f2e1aecaeaf6 2024-11-16T12:49:28,389 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6be5e90c-05d0-eec2-fb64-f2e1aecaeaf6/cluster_f97aeae6-9f63-e107-b4f0-d9a7ad2afc9f/zookeeper_0, clientPort=60221, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6be5e90c-05d0-eec2-fb64-f2e1aecaeaf6/cluster_f97aeae6-9f63-e107-b4f0-d9a7ad2afc9f/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6be5e90c-05d0-eec2-fb64-f2e1aecaeaf6/cluster_f97aeae6-9f63-e107-b4f0-d9a7ad2afc9f/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-16T12:49:28,391 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=60221 2024-11-16T12:49:28,391 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T12:49:28,393 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T12:49:28,394 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:28,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38281 is added to blk_1073741825_1001 (size=7) 2024-11-16T12:49:28,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42515 is added to blk_1073741825_1001 (size=7) 2024-11-16T12:49:28,404 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0 with version=8 2024-11-16T12:49:28,404 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/hbase-staging 2024-11-16T12:49:28,406 INFO [Time-limited test {}] client.ConnectionUtils(128): master/0450ab8807f5:0 server-side Connection retries=45 2024-11-16T12:49:28,406 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T12:49:28,406 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-16T12:49:28,407 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-16T12:49:28,407 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T12:49:28,407 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-16T12:49:28,407 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-16T12:49:28,407 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-16T12:49:28,407 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46653 2024-11-16T12:49:28,410 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:46653 connecting to ZooKeeper ensemble=127.0.0.1:60221 2024-11-16T12:49:28,458 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:466530x0, quorum=127.0.0.1:60221, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-16T12:49:28,459 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:46653-0x10144fb00090000 connected 2024-11-16T12:49:28,521 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T12:49:28,523 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T12:49:28,526 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46653-0x10144fb00090000, quorum=127.0.0.1:60221, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T12:49:28,526 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0, hbase.cluster.distributed=false 2024-11-16T12:49:28,527 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46653-0x10144fb00090000, quorum=127.0.0.1:60221, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-16T12:49:28,528 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46653 2024-11-16T12:49:28,528 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46653 2024-11-16T12:49:28,528 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46653 2024-11-16T12:49:28,529 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46653 2024-11-16T12:49:28,529 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46653 2024-11-16T12:49:28,546 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/0450ab8807f5:0 server-side Connection retries=45 2024-11-16T12:49:28,546 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T12:49:28,546 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-16T12:49:28,546 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-16T12:49:28,546 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T12:49:28,546 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-16T12:49:28,546 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-16T12:49:28,546 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-16T12:49:28,547 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33541 2024-11-16T12:49:28,548 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:33541 connecting to ZooKeeper ensemble=127.0.0.1:60221 2024-11-16T12:49:28,549 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T12:49:28,550 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T12:49:28,563 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:335410x0, quorum=127.0.0.1:60221, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-16T12:49:28,563 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:33541-0x10144fb00090001 connected 2024-11-16T12:49:28,563 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33541-0x10144fb00090001, quorum=127.0.0.1:60221, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T12:49:28,564 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-16T12:49:28,564 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-16T12:49:28,565 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33541-0x10144fb00090001, quorum=127.0.0.1:60221, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-16T12:49:28,566 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33541-0x10144fb00090001, quorum=127.0.0.1:60221, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-16T12:49:28,567 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33541 2024-11-16T12:49:28,567 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33541 2024-11-16T12:49:28,567 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33541 2024-11-16T12:49:28,568 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33541 2024-11-16T12:49:28,568 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33541 2024-11-16T12:49:28,581 DEBUG [M:0;0450ab8807f5:46653 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;0450ab8807f5:46653 2024-11-16T12:49:28,581 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/0450ab8807f5,46653,1731761368406 2024-11-16T12:49:28,588 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46653-0x10144fb00090000, quorum=127.0.0.1:60221, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T12:49:28,588 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33541-0x10144fb00090001, quorum=127.0.0.1:60221, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T12:49:28,588 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46653-0x10144fb00090000, quorum=127.0.0.1:60221, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/0450ab8807f5,46653,1731761368406 2024-11-16T12:49:28,591 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:28,596 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33541-0x10144fb00090001, quorum=127.0.0.1:60221, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-16T12:49:28,596 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46653-0x10144fb00090000, quorum=127.0.0.1:60221, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:49:28,596 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33541-0x10144fb00090001, quorum=127.0.0.1:60221, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:49:28,597 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46653-0x10144fb00090000, quorum=127.0.0.1:60221, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-16T12:49:28,597 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/0450ab8807f5,46653,1731761368406 from backup master directory 2024-11-16T12:49:28,604 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46653-0x10144fb00090000, quorum=127.0.0.1:60221, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/0450ab8807f5,46653,1731761368406 2024-11-16T12:49:28,604 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33541-0x10144fb00090001, quorum=127.0.0.1:60221, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T12:49:28,604 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46653-0x10144fb00090000, quorum=127.0.0.1:60221, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T12:49:28,604 WARN [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-16T12:49:28,604 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=0450ab8807f5,46653,1731761368406 2024-11-16T12:49:28,609 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/hbase.id] with ID: 65d98722-18b6-493a-8adb-6bf3a88a4f49 2024-11-16T12:49:28,609 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/.tmp/hbase.id 2024-11-16T12:49:28,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38281 is added to blk_1073741826_1002 (size=42) 2024-11-16T12:49:28,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42515 is added to blk_1073741826_1002 (size=42) 2024-11-16T12:49:28,618 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/.tmp/hbase.id]:[hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/hbase.id] 2024-11-16T12:49:28,629 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T12:49:28,629 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-16T12:49:28,630 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-16T12:49:28,638 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33541-0x10144fb00090001, quorum=127.0.0.1:60221, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:49:28,638 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46653-0x10144fb00090000, quorum=127.0.0.1:60221, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:49:28,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42515 is added to blk_1073741827_1003 (size=196) 2024-11-16T12:49:28,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38281 is added to blk_1073741827_1003 (size=196) 2024-11-16T12:49:28,657 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-16T12:49:28,658 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-16T12:49:28,658 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T12:49:28,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38281 is added to blk_1073741828_1004 (size=1189) 2024-11-16T12:49:28,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42515 is added to blk_1073741828_1004 (size=1189) 2024-11-16T12:49:28,666 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/MasterData/data/master/store 2024-11-16T12:49:28,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42515 is added to blk_1073741829_1005 (size=34) 2024-11-16T12:49:28,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38281 is added to blk_1073741829_1005 (size=34) 2024-11-16T12:49:28,672 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T12:49:28,672 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-16T12:49:28,672 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T12:49:28,672 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T12:49:28,672 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-16T12:49:28,672 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T12:49:28,672 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T12:49:28,672 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731761368672Disabling compacts and flushes for region at 1731761368672Disabling writes for close at 1731761368672Writing region close event to WAL at 1731761368672Closed at 1731761368672 2024-11-16T12:49:28,673 WARN [master/0450ab8807f5:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/MasterData/data/master/store/.initializing 2024-11-16T12:49:28,673 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/MasterData/WALs/0450ab8807f5,46653,1731761368406 2024-11-16T12:49:28,675 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0450ab8807f5%2C46653%2C1731761368406, suffix=, logDir=hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/MasterData/WALs/0450ab8807f5,46653,1731761368406, archiveDir=hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/MasterData/oldWALs, maxLogs=10 2024-11-16T12:49:28,676 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0450ab8807f5%2C46653%2C1731761368406.1731761368675 2024-11-16T12:49:28,680 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/MasterData/WALs/0450ab8807f5,46653,1731761368406/0450ab8807f5%2C46653%2C1731761368406.1731761368675 2024-11-16T12:49:28,681 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33791:33791),(127.0.0.1/127.0.0.1:34389:34389)] 2024-11-16T12:49:28,681 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-16T12:49:28,681 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T12:49:28,682 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T12:49:28,682 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T12:49:28,683 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T12:49:28,684 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-16T12:49:28,684 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:49:28,684 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T12:49:28,685 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T12:49:28,686 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-16T12:49:28,686 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:49:28,686 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T12:49:28,686 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T12:49:28,687 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-16T12:49:28,687 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:49:28,688 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T12:49:28,688 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T12:49:28,689 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-16T12:49:28,689 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:49:28,689 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T12:49:28,689 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T12:49:28,690 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-16T12:49:28,690 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-16T12:49:28,692 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T12:49:28,692 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T12:49:28,692 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-16T12:49:28,693 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T12:49:28,696 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T12:49:28,696 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=881066, jitterRate=0.12033344805240631}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-16T12:49:28,697 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731761368682Initializing all the Stores at 1731761368682Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731761368682Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731761368683 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731761368683Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731761368683Cleaning up temporary data from old regions at 1731761368692 (+9 ms)Region opened successfully at 1731761368697 (+5 ms) 2024-11-16T12:49:28,697 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-16T12:49:28,701 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@385571f7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=0450ab8807f5/172.17.0.2:0 2024-11-16T12:49:28,702 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-16T12:49:28,702 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-16T12:49:28,702 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-16T12:49:28,702 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-16T12:49:28,702 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-16T12:49:28,703 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-16T12:49:28,703 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-16T12:49:28,705 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-16T12:49:28,706 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46653-0x10144fb00090000, quorum=127.0.0.1:60221, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-16T12:49:28,738 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-16T12:49:28,738 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-16T12:49:28,739 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46653-0x10144fb00090000, quorum=127.0.0.1:60221, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-16T12:49:28,746 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-16T12:49:28,747 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-16T12:49:28,748 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46653-0x10144fb00090000, quorum=127.0.0.1:60221, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-16T12:49:28,754 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-16T12:49:28,755 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46653-0x10144fb00090000, quorum=127.0.0.1:60221, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-16T12:49:28,763 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-16T12:49:28,764 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46653-0x10144fb00090000, quorum=127.0.0.1:60221, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-16T12:49:28,771 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-16T12:49:28,779 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46653-0x10144fb00090000, quorum=127.0.0.1:60221, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-16T12:49:28,779 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33541-0x10144fb00090001, quorum=127.0.0.1:60221, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-16T12:49:28,779 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33541-0x10144fb00090001, quorum=127.0.0.1:60221, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:49:28,779 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46653-0x10144fb00090000, quorum=127.0.0.1:60221, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:49:28,780 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=0450ab8807f5,46653,1731761368406, sessionid=0x10144fb00090000, setting cluster-up flag (Was=false) 2024-11-16T12:49:28,796 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33541-0x10144fb00090001, quorum=127.0.0.1:60221, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:49:28,796 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46653-0x10144fb00090000, quorum=127.0.0.1:60221, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:49:28,821 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-16T12:49:28,822 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=0450ab8807f5,46653,1731761368406 2024-11-16T12:49:28,838 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46653-0x10144fb00090000, quorum=127.0.0.1:60221, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:49:28,838 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33541-0x10144fb00090001, quorum=127.0.0.1:60221, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:49:28,863 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-16T12:49:28,864 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=0450ab8807f5,46653,1731761368406 2024-11-16T12:49:28,865 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-16T12:49:28,866 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-16T12:49:28,867 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-16T12:49:28,867 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-16T12:49:28,867 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 0450ab8807f5,46653,1731761368406 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-16T12:49:28,868 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/0450ab8807f5:0, corePoolSize=5, maxPoolSize=5 2024-11-16T12:49:28,868 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/0450ab8807f5:0, corePoolSize=5, maxPoolSize=5 2024-11-16T12:49:28,868 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/0450ab8807f5:0, corePoolSize=5, maxPoolSize=5 2024-11-16T12:49:28,869 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/0450ab8807f5:0, corePoolSize=5, maxPoolSize=5 2024-11-16T12:49:28,869 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/0450ab8807f5:0, corePoolSize=10, maxPoolSize=10 2024-11-16T12:49:28,869 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:49:28,869 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/0450ab8807f5:0, corePoolSize=2, maxPoolSize=2 2024-11-16T12:49:28,869 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:49:28,871 INFO [RS:0;0450ab8807f5:33541 {}] regionserver.HRegionServer(746): ClusterId : 65d98722-18b6-493a-8adb-6bf3a88a4f49 2024-11-16T12:49:28,871 DEBUG [RS:0;0450ab8807f5:33541 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-16T12:49:28,872 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731761398872 2024-11-16T12:49:28,872 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-16T12:49:28,872 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-16T12:49:28,872 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-16T12:49:28,872 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-16T12:49:28,872 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-16T12:49:28,872 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-16T12:49:28,872 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-16T12:49:28,872 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T12:49:28,873 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-16T12:49:28,873 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-16T12:49:28,873 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-16T12:49:28,873 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-16T12:49:28,873 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-16T12:49:28,873 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-16T12:49:28,873 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/0450ab8807f5:0:becomeActiveMaster-HFileCleaner.large.0-1731761368873,5,FailOnTimeoutGroup] 2024-11-16T12:49:28,873 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/0450ab8807f5:0:becomeActiveMaster-HFileCleaner.small.0-1731761368873,5,FailOnTimeoutGroup] 2024-11-16T12:49:28,873 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-16T12:49:28,873 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-16T12:49:28,874 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-16T12:49:28,874 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-16T12:49:28,874 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:49:28,874 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-16T12:49:28,880 DEBUG [RS:0;0450ab8807f5:33541 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-16T12:49:28,880 DEBUG [RS:0;0450ab8807f5:33541 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-16T12:49:28,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38281 is added to blk_1073741831_1007 (size=1321) 2024-11-16T12:49:28,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42515 is added to blk_1073741831_1007 (size=1321) 2024-11-16T12:49:28,882 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-16T12:49:28,882 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0 2024-11-16T12:49:28,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42515 is added to blk_1073741832_1008 (size=32) 2024-11-16T12:49:28,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38281 is added to blk_1073741832_1008 (size=32) 2024-11-16T12:49:28,889 DEBUG [RS:0;0450ab8807f5:33541 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-16T12:49:28,889 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T12:49:28,889 DEBUG [RS:0;0450ab8807f5:33541 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@23b2afa7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=0450ab8807f5/172.17.0.2:0 2024-11-16T12:49:28,890 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-16T12:49:28,891 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-16T12:49:28,891 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:49:28,892 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T12:49:28,892 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-16T12:49:28,893 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-16T12:49:28,893 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:49:28,894 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T12:49:28,894 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-16T12:49:28,895 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-16T12:49:28,895 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:49:28,895 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T12:49:28,895 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-16T12:49:28,896 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-16T12:49:28,896 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:49:28,897 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T12:49:28,897 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-16T12:49:28,897 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/hbase/meta/1588230740 2024-11-16T12:49:28,897 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/hbase/meta/1588230740 2024-11-16T12:49:28,899 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-16T12:49:28,899 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-16T12:49:28,899 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-16T12:49:28,900 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-16T12:49:28,902 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T12:49:28,902 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=766942, jitterRate=-0.024783164262771606}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-16T12:49:28,903 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731761368889Initializing all the Stores at 1731761368890 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731761368890Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731761368890Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731761368890Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731761368890Cleaning up temporary data from old regions at 1731761368899 (+9 ms)Region opened successfully at 1731761368903 (+4 ms) 2024-11-16T12:49:28,903 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-16T12:49:28,903 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-16T12:49:28,903 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-16T12:49:28,903 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-16T12:49:28,903 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-16T12:49:28,904 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-16T12:49:28,904 DEBUG [RS:0;0450ab8807f5:33541 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;0450ab8807f5:33541 2024-11-16T12:49:28,904 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731761368903Disabling compacts and flushes for region at 1731761368903Disabling writes for close at 1731761368903Writing region close event to WAL at 1731761368904 (+1 ms)Closed at 1731761368904 2024-11-16T12:49:28,904 INFO [RS:0;0450ab8807f5:33541 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-16T12:49:28,904 INFO [RS:0;0450ab8807f5:33541 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-16T12:49:28,904 DEBUG [RS:0;0450ab8807f5:33541 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-16T12:49:28,905 INFO [RS:0;0450ab8807f5:33541 {}] regionserver.HRegionServer(2659): reportForDuty to master=0450ab8807f5,46653,1731761368406 with port=33541, startcode=1731761368545 2024-11-16T12:49:28,905 DEBUG [RS:0;0450ab8807f5:33541 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-16T12:49:28,905 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T12:49:28,905 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-16T12:49:28,905 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-16T12:49:28,907 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44419, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.6 (auth:SIMPLE), service=RegionServerStatusService 2024-11-16T12:49:28,907 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46653 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 0450ab8807f5,33541,1731761368545 2024-11-16T12:49:28,907 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46653 {}] master.ServerManager(517): Registering regionserver=0450ab8807f5,33541,1731761368545 2024-11-16T12:49:28,907 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-16T12:49:28,908 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-16T12:49:28,909 DEBUG [RS:0;0450ab8807f5:33541 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0 2024-11-16T12:49:28,909 DEBUG [RS:0;0450ab8807f5:33541 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:39017 2024-11-16T12:49:28,909 DEBUG [RS:0;0450ab8807f5:33541 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-16T12:49:28,918 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46653-0x10144fb00090000, quorum=127.0.0.1:60221, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-16T12:49:28,918 DEBUG [RS:0;0450ab8807f5:33541 {}] zookeeper.ZKUtil(111): regionserver:33541-0x10144fb00090001, quorum=127.0.0.1:60221, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/0450ab8807f5,33541,1731761368545 2024-11-16T12:49:28,919 WARN [RS:0;0450ab8807f5:33541 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-16T12:49:28,919 INFO [RS:0;0450ab8807f5:33541 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T12:49:28,919 DEBUG [RS:0;0450ab8807f5:33541 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/WALs/0450ab8807f5,33541,1731761368545 2024-11-16T12:49:28,919 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [0450ab8807f5,33541,1731761368545] 2024-11-16T12:49:28,922 INFO [RS:0;0450ab8807f5:33541 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-16T12:49:28,924 INFO [RS:0;0450ab8807f5:33541 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-16T12:49:28,925 INFO [RS:0;0450ab8807f5:33541 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-16T12:49:28,925 INFO [RS:0;0450ab8807f5:33541 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T12:49:28,925 INFO [RS:0;0450ab8807f5:33541 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-16T12:49:28,926 INFO [RS:0;0450ab8807f5:33541 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-16T12:49:28,926 INFO [RS:0;0450ab8807f5:33541 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-16T12:49:28,926 DEBUG [RS:0;0450ab8807f5:33541 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:49:28,926 DEBUG [RS:0;0450ab8807f5:33541 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:49:28,926 DEBUG [RS:0;0450ab8807f5:33541 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:49:28,926 DEBUG [RS:0;0450ab8807f5:33541 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:49:28,926 DEBUG [RS:0;0450ab8807f5:33541 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:49:28,926 DEBUG [RS:0;0450ab8807f5:33541 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/0450ab8807f5:0, corePoolSize=2, maxPoolSize=2 2024-11-16T12:49:28,927 DEBUG [RS:0;0450ab8807f5:33541 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:49:28,927 DEBUG [RS:0;0450ab8807f5:33541 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:49:28,927 DEBUG [RS:0;0450ab8807f5:33541 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:49:28,927 DEBUG [RS:0;0450ab8807f5:33541 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:49:28,927 DEBUG [RS:0;0450ab8807f5:33541 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:49:28,927 DEBUG [RS:0;0450ab8807f5:33541 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:49:28,927 DEBUG [RS:0;0450ab8807f5:33541 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/0450ab8807f5:0, corePoolSize=3, maxPoolSize=3 2024-11-16T12:49:28,927 DEBUG [RS:0;0450ab8807f5:33541 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/0450ab8807f5:0, corePoolSize=3, maxPoolSize=3 2024-11-16T12:49:28,929 INFO [RS:0;0450ab8807f5:33541 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T12:49:28,929 INFO [RS:0;0450ab8807f5:33541 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T12:49:28,929 INFO [RS:0;0450ab8807f5:33541 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T12:49:28,929 INFO [RS:0;0450ab8807f5:33541 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-16T12:49:28,929 INFO [RS:0;0450ab8807f5:33541 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-16T12:49:28,929 INFO [RS:0;0450ab8807f5:33541 {}] hbase.ChoreService(168): Chore ScheduledChore name=0450ab8807f5,33541,1731761368545-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-16T12:49:28,943 INFO [RS:0;0450ab8807f5:33541 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-16T12:49:28,944 INFO [RS:0;0450ab8807f5:33541 {}] hbase.ChoreService(168): Chore ScheduledChore name=0450ab8807f5,33541,1731761368545-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T12:49:28,944 INFO [RS:0;0450ab8807f5:33541 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T12:49:28,944 INFO [RS:0;0450ab8807f5:33541 {}] regionserver.Replication(171): 0450ab8807f5,33541,1731761368545 started 2024-11-16T12:49:28,957 INFO [RS:0;0450ab8807f5:33541 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T12:49:28,957 INFO [RS:0;0450ab8807f5:33541 {}] regionserver.HRegionServer(1482): Serving as 0450ab8807f5,33541,1731761368545, RpcServer on 0450ab8807f5/172.17.0.2:33541, sessionid=0x10144fb00090001 2024-11-16T12:49:28,958 DEBUG [RS:0;0450ab8807f5:33541 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-16T12:49:28,958 DEBUG [RS:0;0450ab8807f5:33541 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 0450ab8807f5,33541,1731761368545 2024-11-16T12:49:28,958 DEBUG [RS:0;0450ab8807f5:33541 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '0450ab8807f5,33541,1731761368545' 2024-11-16T12:49:28,958 DEBUG [RS:0;0450ab8807f5:33541 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-16T12:49:28,958 DEBUG [RS:0;0450ab8807f5:33541 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-16T12:49:28,959 DEBUG [RS:0;0450ab8807f5:33541 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-16T12:49:28,959 DEBUG [RS:0;0450ab8807f5:33541 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-16T12:49:28,959 DEBUG [RS:0;0450ab8807f5:33541 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 0450ab8807f5,33541,1731761368545 2024-11-16T12:49:28,959 DEBUG [RS:0;0450ab8807f5:33541 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '0450ab8807f5,33541,1731761368545' 2024-11-16T12:49:28,959 DEBUG [RS:0;0450ab8807f5:33541 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-16T12:49:28,959 DEBUG [RS:0;0450ab8807f5:33541 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-16T12:49:28,959 DEBUG [RS:0;0450ab8807f5:33541 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-16T12:49:28,959 INFO [RS:0;0450ab8807f5:33541 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-16T12:49:28,959 INFO [RS:0;0450ab8807f5:33541 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-16T12:49:29,059 WARN [0450ab8807f5:46653 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-16T12:49:29,062 INFO [RS:0;0450ab8807f5:33541 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0450ab8807f5%2C33541%2C1731761368545, suffix=, logDir=hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/WALs/0450ab8807f5,33541,1731761368545, archiveDir=hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/oldWALs, maxLogs=32 2024-11-16T12:49:29,063 INFO [RS:0;0450ab8807f5:33541 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0450ab8807f5%2C33541%2C1731761368545.1731761369062 2024-11-16T12:49:29,070 INFO [RS:0;0450ab8807f5:33541 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/WALs/0450ab8807f5,33541,1731761368545/0450ab8807f5%2C33541%2C1731761368545.1731761369062 2024-11-16T12:49:29,072 DEBUG [RS:0;0450ab8807f5:33541 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34389:34389),(127.0.0.1/127.0.0.1:33791:33791)] 2024-11-16T12:49:29,309 DEBUG [0450ab8807f5:46653 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-16T12:49:29,310 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=0450ab8807f5,33541,1731761368545 2024-11-16T12:49:29,311 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 0450ab8807f5,33541,1731761368545, state=OPENING 2024-11-16T12:49:29,371 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-16T12:49:29,379 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33541-0x10144fb00090001, quorum=127.0.0.1:60221, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:49:29,379 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46653-0x10144fb00090000, quorum=127.0.0.1:60221, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:49:29,380 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-16T12:49:29,380 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T12:49:29,380 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T12:49:29,380 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=0450ab8807f5,33541,1731761368545}] 2024-11-16T12:49:29,395 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:29,534 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-16T12:49:29,537 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59357, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-16T12:49:29,542 INFO [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-16T12:49:29,542 INFO [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T12:49:29,544 INFO [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0450ab8807f5%2C33541%2C1731761368545.meta, suffix=.meta, logDir=hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/WALs/0450ab8807f5,33541,1731761368545, archiveDir=hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/oldWALs, maxLogs=32 2024-11-16T12:49:29,545 INFO [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 0450ab8807f5%2C33541%2C1731761368545.meta.1731761369544.meta 2024-11-16T12:49:29,550 INFO [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/WALs/0450ab8807f5,33541,1731761368545/0450ab8807f5%2C33541%2C1731761368545.meta.1731761369544.meta 2024-11-16T12:49:29,550 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34389:34389),(127.0.0.1/127.0.0.1:33791:33791)] 2024-11-16T12:49:29,551 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-16T12:49:29,551 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-16T12:49:29,551 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-16T12:49:29,551 INFO [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-16T12:49:29,552 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-16T12:49:29,552 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T12:49:29,552 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-16T12:49:29,552 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-16T12:49:29,553 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-16T12:49:29,554 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-16T12:49:29,554 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:49:29,554 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T12:49:29,554 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-16T12:49:29,555 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-16T12:49:29,555 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:49:29,556 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T12:49:29,556 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-16T12:49:29,557 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-16T12:49:29,557 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:49:29,557 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T12:49:29,557 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-16T12:49:29,558 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-16T12:49:29,558 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:49:29,559 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T12:49:29,559 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-16T12:49:29,560 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/hbase/meta/1588230740 2024-11-16T12:49:29,561 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/hbase/meta/1588230740 2024-11-16T12:49:29,562 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-16T12:49:29,562 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-16T12:49:29,563 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-16T12:49:29,564 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-16T12:49:29,565 INFO [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=779692, jitterRate=-0.008570551872253418}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-16T12:49:29,565 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-16T12:49:29,566 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731761369552Writing region info on filesystem at 1731761369552Initializing all the Stores at 1731761369553 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731761369553Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731761369553Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731761369553Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731761369553Cleaning up temporary data from old regions at 1731761369562 (+9 ms)Running coprocessor post-open hooks at 1731761369565 (+3 ms)Region opened successfully at 1731761369566 (+1 ms) 2024-11-16T12:49:29,567 INFO [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731761369534 2024-11-16T12:49:29,569 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-16T12:49:29,569 INFO [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-16T12:49:29,570 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=0450ab8807f5,33541,1731761368545 2024-11-16T12:49:29,571 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 0450ab8807f5,33541,1731761368545, state=OPEN 2024-11-16T12:49:29,592 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:29,609 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46653-0x10144fb00090000, quorum=127.0.0.1:60221, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-16T12:49:29,609 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33541-0x10144fb00090001, quorum=127.0.0.1:60221, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-16T12:49:29,609 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T12:49:29,609 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T12:49:29,609 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=0450ab8807f5,33541,1731761368545 2024-11-16T12:49:29,613 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-16T12:49:29,613 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=0450ab8807f5,33541,1731761368545 in 229 msec 2024-11-16T12:49:29,616 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-16T12:49:29,616 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 708 msec 2024-11-16T12:49:29,617 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T12:49:29,617 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-16T12:49:29,618 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T12:49:29,618 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=0450ab8807f5,33541,1731761368545, seqNum=-1] 2024-11-16T12:49:29,619 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T12:49:29,620 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51997, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T12:49:29,625 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 759 msec 2024-11-16T12:49:29,626 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731761369625, completionTime=-1 2024-11-16T12:49:29,626 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-16T12:49:29,626 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-16T12:49:29,628 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-16T12:49:29,628 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731761429628 2024-11-16T12:49:29,628 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731761489628 2024-11-16T12:49:29,628 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-11-16T12:49:29,628 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0450ab8807f5,46653,1731761368406-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T12:49:29,628 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0450ab8807f5,46653,1731761368406-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T12:49:29,628 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0450ab8807f5,46653,1731761368406-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T12:49:29,628 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-0450ab8807f5:46653, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T12:49:29,628 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-16T12:49:29,628 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-16T12:49:29,630 DEBUG [master/0450ab8807f5:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-16T12:49:29,633 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.028sec 2024-11-16T12:49:29,633 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-16T12:49:29,633 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-16T12:49:29,633 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-16T12:49:29,633 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-16T12:49:29,633 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-16T12:49:29,633 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0450ab8807f5,46653,1731761368406-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-16T12:49:29,633 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0450ab8807f5,46653,1731761368406-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-16T12:49:29,636 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-16T12:49:29,636 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-16T12:49:29,636 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0450ab8807f5,46653,1731761368406-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T12:49:29,670 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@20775fe3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T12:49:29,671 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 0450ab8807f5,46653,-1 for getting cluster id 2024-11-16T12:49:29,671 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-16T12:49:29,673 DEBUG [HMaster-EventLoopGroup-14-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '65d98722-18b6-493a-8adb-6bf3a88a4f49' 2024-11-16T12:49:29,673 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-16T12:49:29,673 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "65d98722-18b6-493a-8adb-6bf3a88a4f49" 2024-11-16T12:49:29,673 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4348ee9e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T12:49:29,674 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [0450ab8807f5,46653,-1] 2024-11-16T12:49:29,674 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-16T12:49:29,674 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T12:49:29,676 INFO [HMaster-EventLoopGroup-14-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34526, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-16T12:49:29,677 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2f203990, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T12:49:29,677 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T12:49:29,679 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=0450ab8807f5,33541,1731761368545, seqNum=-1] 2024-11-16T12:49:29,679 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T12:49:29,680 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53098, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T12:49:29,683 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=0450ab8807f5,46653,1731761368406 2024-11-16T12:49:29,683 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T12:49:29,686 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-16T12:49:29,686 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-16T12:49:29,687 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.AsyncConnectionImpl(321): The fetched master address is 0450ab8807f5,46653,1731761368406 2024-11-16T12:49:29,687 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@3152bb03 2024-11-16T12:49:29,687 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-16T12:49:29,688 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34540, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-16T12:49:29,689 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46653 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-16T12:49:29,689 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46653 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-16T12:49:29,689 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46653 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-16T12:49:29,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46653 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling 2024-11-16T12:49:29,692 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-16T12:49:29,692 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:49:29,692 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46653 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRolling" procId is: 4 2024-11-16T12:49:29,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46653 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-16T12:49:29,694 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-16T12:49:29,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38281 is added to blk_1073741835_1011 (size=381) 2024-11-16T12:49:29,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42515 is added to blk_1073741835_1011 (size=381) 2024-11-16T12:49:29,702 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 56327ae7c4c61355f9927cc610edcd05, NAME => 'TestLogRolling-testLogRolling,,1731761369689.56327ae7c4c61355f9927cc610edcd05.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0 2024-11-16T12:49:29,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42515 is added to blk_1073741836_1012 (size=64) 2024-11-16T12:49:29,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38281 is added to blk_1073741836_1012 (size=64) 2024-11-16T12:49:29,709 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731761369689.56327ae7c4c61355f9927cc610edcd05.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T12:49:29,709 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 56327ae7c4c61355f9927cc610edcd05, disabling compactions & flushes 2024-11-16T12:49:29,709 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731761369689.56327ae7c4c61355f9927cc610edcd05. 2024-11-16T12:49:29,709 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731761369689.56327ae7c4c61355f9927cc610edcd05. 2024-11-16T12:49:29,709 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731761369689.56327ae7c4c61355f9927cc610edcd05. after waiting 0 ms 2024-11-16T12:49:29,709 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731761369689.56327ae7c4c61355f9927cc610edcd05. 2024-11-16T12:49:29,709 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731761369689.56327ae7c4c61355f9927cc610edcd05. 2024-11-16T12:49:29,709 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 56327ae7c4c61355f9927cc610edcd05: Waiting for close lock at 1731761369709Disabling compacts and flushes for region at 1731761369709Disabling writes for close at 1731761369709Writing region close event to WAL at 1731761369709Closed at 1731761369709 2024-11-16T12:49:29,710 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-16T12:49:29,711 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRolling,,1731761369689.56327ae7c4c61355f9927cc610edcd05.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1731761369710"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731761369710"}]},"ts":"1731761369710"} 2024-11-16T12:49:29,713 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-16T12:49:29,714 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-16T12:49:29,715 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731761369714"}]},"ts":"1731761369714"} 2024-11-16T12:49:29,717 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLING in hbase:meta 2024-11-16T12:49:29,717 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=56327ae7c4c61355f9927cc610edcd05, ASSIGN}] 2024-11-16T12:49:29,719 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=56327ae7c4c61355f9927cc610edcd05, ASSIGN 2024-11-16T12:49:29,720 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=56327ae7c4c61355f9927cc610edcd05, ASSIGN; state=OFFLINE, location=0450ab8807f5,33541,1731761368545; forceNewPlan=false, retain=false 2024-11-16T12:49:29,871 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=56327ae7c4c61355f9927cc610edcd05, regionState=OPENING, regionLocation=0450ab8807f5,33541,1731761368545 2024-11-16T12:49:29,875 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=56327ae7c4c61355f9927cc610edcd05, ASSIGN because future has completed 2024-11-16T12:49:29,875 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 56327ae7c4c61355f9927cc610edcd05, server=0450ab8807f5,33541,1731761368545}] 2024-11-16T12:49:30,034 INFO [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1731761369689.56327ae7c4c61355f9927cc610edcd05. 2024-11-16T12:49:30,034 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 56327ae7c4c61355f9927cc610edcd05, NAME => 'TestLogRolling-testLogRolling,,1731761369689.56327ae7c4c61355f9927cc610edcd05.', STARTKEY => '', ENDKEY => ''} 2024-11-16T12:49:30,035 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 56327ae7c4c61355f9927cc610edcd05 2024-11-16T12:49:30,035 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731761369689.56327ae7c4c61355f9927cc610edcd05.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T12:49:30,035 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 56327ae7c4c61355f9927cc610edcd05 2024-11-16T12:49:30,035 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 56327ae7c4c61355f9927cc610edcd05 2024-11-16T12:49:30,038 INFO [StoreOpener-56327ae7c4c61355f9927cc610edcd05-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 56327ae7c4c61355f9927cc610edcd05 2024-11-16T12:49:30,040 INFO [StoreOpener-56327ae7c4c61355f9927cc610edcd05-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 56327ae7c4c61355f9927cc610edcd05 columnFamilyName info 2024-11-16T12:49:30,040 DEBUG [StoreOpener-56327ae7c4c61355f9927cc610edcd05-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:49:30,040 INFO [StoreOpener-56327ae7c4c61355f9927cc610edcd05-1 {}] regionserver.HStore(327): Store=56327ae7c4c61355f9927cc610edcd05/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T12:49:30,041 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 56327ae7c4c61355f9927cc610edcd05 2024-11-16T12:49:30,042 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/56327ae7c4c61355f9927cc610edcd05 2024-11-16T12:49:30,042 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/56327ae7c4c61355f9927cc610edcd05 2024-11-16T12:49:30,043 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 56327ae7c4c61355f9927cc610edcd05 2024-11-16T12:49:30,043 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 56327ae7c4c61355f9927cc610edcd05 2024-11-16T12:49:30,045 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 56327ae7c4c61355f9927cc610edcd05 2024-11-16T12:49:30,048 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/56327ae7c4c61355f9927cc610edcd05/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T12:49:30,048 INFO [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 56327ae7c4c61355f9927cc610edcd05; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=734681, jitterRate=-0.06580498814582825}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-16T12:49:30,048 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 56327ae7c4c61355f9927cc610edcd05 2024-11-16T12:49:30,049 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 56327ae7c4c61355f9927cc610edcd05: Running coprocessor pre-open hook at 1731761370035Writing region info on filesystem at 1731761370035Initializing all the Stores at 1731761370037 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731761370037Cleaning up temporary data from old regions at 1731761370043 (+6 ms)Running coprocessor post-open hooks at 1731761370048 (+5 ms)Region opened successfully at 1731761370049 (+1 ms) 2024-11-16T12:49:30,050 INFO [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1731761369689.56327ae7c4c61355f9927cc610edcd05., pid=6, masterSystemTime=1731761370029 2024-11-16T12:49:30,053 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1731761369689.56327ae7c4c61355f9927cc610edcd05. 2024-11-16T12:49:30,053 INFO [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1731761369689.56327ae7c4c61355f9927cc610edcd05. 2024-11-16T12:49:30,054 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=56327ae7c4c61355f9927cc610edcd05, regionState=OPEN, openSeqNum=2, regionLocation=0450ab8807f5,33541,1731761368545 2024-11-16T12:49:30,057 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 56327ae7c4c61355f9927cc610edcd05, server=0450ab8807f5,33541,1731761368545 because future has completed 2024-11-16T12:49:30,062 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-16T12:49:30,062 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 56327ae7c4c61355f9927cc610edcd05, server=0450ab8807f5,33541,1731761368545 in 184 msec 2024-11-16T12:49:30,065 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-16T12:49:30,065 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=56327ae7c4c61355f9927cc610edcd05, ASSIGN in 345 msec 2024-11-16T12:49:30,066 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-16T12:49:30,066 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731761370066"}]},"ts":"1731761370066"} 2024-11-16T12:49:30,069 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLED in hbase:meta 2024-11-16T12:49:30,070 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-16T12:49:30,072 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling in 381 msec 2024-11-16T12:49:30,396 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:30,592 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:31,106 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:49:31,106 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:49:31,106 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:49:31,107 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:49:31,107 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:49:31,107 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:49:31,108 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:49:31,108 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:49:31,130 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:49:31,130 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:49:31,130 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:49:31,130 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:49:31,131 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:49:31,131 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:49:31,136 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:49:31,136 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:49:31,137 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:49:31,140 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:49:31,396 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:31,593 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:31,646 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-16T12:49:31,647 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:49:31,647 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:49:31,647 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:49:31,648 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:49:31,648 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:49:31,648 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:49:31,650 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:49:31,650 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:49:31,686 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:49:31,687 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:49:31,687 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:49:31,688 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:49:31,688 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:49:31,688 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:49:31,695 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:49:31,695 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:49:31,695 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:49:31,700 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:49:32,397 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:32,594 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:33,398 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:33,594 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:34,399 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:34,595 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:34,922 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-16T12:49:34,923 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRolling' 2024-11-16T12:49:35,399 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:35,596 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:36,400 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:36,596 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:36,950 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-16T12:49:36,950 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-16T12:49:36,951 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-16T12:49:36,951 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-16T12:49:36,951 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-16T12:49:36,951 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-16T12:49:36,952 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-11-16T12:49:36,952 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-16T12:49:37,401 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:37,597 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:38,401 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:38,598 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:39,402 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:39,598 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:39,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46653 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-16T12:49:39,785 INFO [RPCClient-NioEventLoopGroup-4-7 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRolling completed 2024-11-16T12:49:39,785 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRolling,, stopping at row=TestLogRolling-testLogRolling ,, for max=2147483647 with caching=100 2024-11-16T12:49:39,788 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRolling 2024-11-16T12:49:39,788 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRolling,,1731761369689.56327ae7c4c61355f9927cc610edcd05. 2024-11-16T12:49:39,791 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testLogRolling,,1731761369689.56327ae7c4c61355f9927cc610edcd05., hostname=0450ab8807f5,33541,1731761368545, seqNum=2] 2024-11-16T12:49:39,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33541 {}] regionserver.HRegion(8855): Flush requested on 56327ae7c4c61355f9927cc610edcd05 2024-11-16T12:49:39,806 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 56327ae7c4c61355f9927cc610edcd05 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-16T12:49:39,826 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/56327ae7c4c61355f9927cc610edcd05/.tmp/info/59de560982b84ad4adcbe307b6b8707c is 1080, key is row0001/info:/1731761379792/Put/seqid=0 2024-11-16T12:49:39,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42515 is added to blk_1073741837_1013 (size=12509) 2024-11-16T12:49:39,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38281 is added to blk_1073741837_1013 (size=12509) 2024-11-16T12:49:39,850 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/56327ae7c4c61355f9927cc610edcd05/.tmp/info/59de560982b84ad4adcbe307b6b8707c 2024-11-16T12:49:39,853 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33541 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=56327ae7c4c61355f9927cc610edcd05, server=0450ab8807f5,33541,1731761368545 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-16T12:49:39,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33541 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:53098 deadline: 1731761389852, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=56327ae7c4c61355f9927cc610edcd05, server=0450ab8807f5,33541,1731761368545 2024-11-16T12:49:39,862 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/56327ae7c4c61355f9927cc610edcd05/.tmp/info/59de560982b84ad4adcbe307b6b8707c as hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/56327ae7c4c61355f9927cc610edcd05/info/59de560982b84ad4adcbe307b6b8707c 2024-11-16T12:49:39,869 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/56327ae7c4c61355f9927cc610edcd05/info/59de560982b84ad4adcbe307b6b8707c, entries=7, sequenceid=11, filesize=12.2 K 2024-11-16T12:49:39,871 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=23.12 KB/23672 for 56327ae7c4c61355f9927cc610edcd05 in 65ms, sequenceid=11, compaction requested=false 2024-11-16T12:49:39,871 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 56327ae7c4c61355f9927cc610edcd05: 2024-11-16T12:49:39,879 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1731761369689.56327ae7c4c61355f9927cc610edcd05., hostname=0450ab8807f5,33541,1731761368545, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1731761369689.56327ae7c4c61355f9927cc610edcd05., hostname=0450ab8807f5,33541,1731761368545, seqNum=2, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=56327ae7c4c61355f9927cc610edcd05, server=0450ab8807f5,33541,1731761368545 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-16T12:49:39,880 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1731761369689.56327ae7c4c61355f9927cc610edcd05., hostname=0450ab8807f5,33541,1731761368545, seqNum=2 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=56327ae7c4c61355f9927cc610edcd05, server=0450ab8807f5,33541,1731761368545 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-16T12:49:39,880 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,,1731761369689.56327ae7c4c61355f9927cc610edcd05., hostname=0450ab8807f5,33541,1731761368545, seqNum=2 because the exception is null or not the one we care about 2024-11-16T12:49:40,403 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:40,599 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:41,404 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:41,600 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:42,405 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:42,601 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:43,406 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:43,602 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:44,406 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:44,602 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:45,407 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:45,603 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:46,408 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:46,604 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:47,408 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:47,605 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:48,409 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:48,606 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:49,410 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:49,607 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:49,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33541 {}] regionserver.HRegion(8855): Flush requested on 56327ae7c4c61355f9927cc610edcd05 2024-11-16T12:49:49,917 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 56327ae7c4c61355f9927cc610edcd05 1/1 column families, dataSize=24.17 KB heapSize=26.13 KB 2024-11-16T12:49:49,924 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/56327ae7c4c61355f9927cc610edcd05/.tmp/info/cc015bbc746d49f9a27ea2887c348ac4 is 1080, key is row0008/info:/1731761379807/Put/seqid=0 2024-11-16T12:49:49,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42515 is added to blk_1073741838_1014 (size=29761) 2024-11-16T12:49:49,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38281 is added to blk_1073741838_1014 (size=29761) 2024-11-16T12:49:49,932 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.17 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/56327ae7c4c61355f9927cc610edcd05/.tmp/info/cc015bbc746d49f9a27ea2887c348ac4 2024-11-16T12:49:49,939 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/56327ae7c4c61355f9927cc610edcd05/.tmp/info/cc015bbc746d49f9a27ea2887c348ac4 as hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/56327ae7c4c61355f9927cc610edcd05/info/cc015bbc746d49f9a27ea2887c348ac4 2024-11-16T12:49:49,945 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/56327ae7c4c61355f9927cc610edcd05/info/cc015bbc746d49f9a27ea2887c348ac4, entries=23, sequenceid=37, filesize=29.1 K 2024-11-16T12:49:49,946 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~24.17 KB/24748, heapSize ~26.11 KB/26736, currentSize=2.10 KB/2152 for 56327ae7c4c61355f9927cc610edcd05 in 29ms, sequenceid=37, compaction requested=false 2024-11-16T12:49:49,946 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 56327ae7c4c61355f9927cc610edcd05: 2024-11-16T12:49:49,946 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=41.3 K, sizeToCheck=16.0 K 2024-11-16T12:49:49,946 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T12:49:49,946 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/56327ae7c4c61355f9927cc610edcd05/info/cc015bbc746d49f9a27ea2887c348ac4 because midkey is the same as first or last row 2024-11-16T12:49:50,410 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:50,607 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:51,411 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:51,608 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:51,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33541 {}] regionserver.HRegion(8855): Flush requested on 56327ae7c4c61355f9927cc610edcd05 2024-11-16T12:49:51,934 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 56327ae7c4c61355f9927cc610edcd05 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-16T12:49:51,940 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/56327ae7c4c61355f9927cc610edcd05/.tmp/info/1ebec90106a44f29bd108e7000c00705 is 1080, key is row0031/info:/1731761389918/Put/seqid=0 2024-11-16T12:49:51,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42515 is added to blk_1073741839_1015 (size=12509) 2024-11-16T12:49:51,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38281 is added to blk_1073741839_1015 (size=12509) 2024-11-16T12:49:51,953 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=47 (bloomFilter=true), to=hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/56327ae7c4c61355f9927cc610edcd05/.tmp/info/1ebec90106a44f29bd108e7000c00705 2024-11-16T12:49:51,960 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/56327ae7c4c61355f9927cc610edcd05/.tmp/info/1ebec90106a44f29bd108e7000c00705 as hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/56327ae7c4c61355f9927cc610edcd05/info/1ebec90106a44f29bd108e7000c00705 2024-11-16T12:49:51,967 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/56327ae7c4c61355f9927cc610edcd05/info/1ebec90106a44f29bd108e7000c00705, entries=7, sequenceid=47, filesize=12.2 K 2024-11-16T12:49:51,968 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=14.71 KB/15064 for 56327ae7c4c61355f9927cc610edcd05 in 34ms, sequenceid=47, compaction requested=true 2024-11-16T12:49:51,968 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 56327ae7c4c61355f9927cc610edcd05: 2024-11-16T12:49:51,968 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=53.5 K, sizeToCheck=16.0 K 2024-11-16T12:49:51,968 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T12:49:51,968 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/56327ae7c4c61355f9927cc610edcd05/info/cc015bbc746d49f9a27ea2887c348ac4 because midkey is the same as first or last row 2024-11-16T12:49:51,968 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 56327ae7c4c61355f9927cc610edcd05:info, priority=-2147483648, current under compaction store size is 1 2024-11-16T12:49:51,968 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T12:49:51,968 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-16T12:49:51,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33541 {}] regionserver.HRegion(8855): Flush requested on 56327ae7c4c61355f9927cc610edcd05 2024-11-16T12:49:51,969 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 56327ae7c4c61355f9927cc610edcd05 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-11-16T12:49:51,970 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 54779 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-16T12:49:51,970 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.HStore(1541): 56327ae7c4c61355f9927cc610edcd05/info is initiating minor compaction (all files) 2024-11-16T12:49:51,970 INFO [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 56327ae7c4c61355f9927cc610edcd05/info in TestLogRolling-testLogRolling,,1731761369689.56327ae7c4c61355f9927cc610edcd05. 2024-11-16T12:49:51,970 INFO [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/56327ae7c4c61355f9927cc610edcd05/info/59de560982b84ad4adcbe307b6b8707c, hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/56327ae7c4c61355f9927cc610edcd05/info/cc015bbc746d49f9a27ea2887c348ac4, hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/56327ae7c4c61355f9927cc610edcd05/info/1ebec90106a44f29bd108e7000c00705] into tmpdir=hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/56327ae7c4c61355f9927cc610edcd05/.tmp, totalSize=53.5 K 2024-11-16T12:49:51,971 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] compactions.Compactor(225): Compacting 59de560982b84ad4adcbe307b6b8707c, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731761379792 2024-11-16T12:49:51,971 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] compactions.Compactor(225): Compacting cc015bbc746d49f9a27ea2887c348ac4, keycount=23, bloomtype=ROW, size=29.1 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1731761379807 2024-11-16T12:49:51,972 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1ebec90106a44f29bd108e7000c00705, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1731761389918 2024-11-16T12:49:51,974 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/56327ae7c4c61355f9927cc610edcd05/.tmp/info/836014b2b0f44119b9fcfb5941ddfb8c is 1080, key is row0038/info:/1731761391935/Put/seqid=0 2024-11-16T12:49:51,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38281 is added to blk_1073741840_1016 (size=21141) 2024-11-16T12:49:51,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42515 is added to blk_1073741840_1016 (size=21141) 2024-11-16T12:49:51,980 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=65 (bloomFilter=true), to=hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/56327ae7c4c61355f9927cc610edcd05/.tmp/info/836014b2b0f44119b9fcfb5941ddfb8c 2024-11-16T12:49:51,993 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/56327ae7c4c61355f9927cc610edcd05/.tmp/info/836014b2b0f44119b9fcfb5941ddfb8c as hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/56327ae7c4c61355f9927cc610edcd05/info/836014b2b0f44119b9fcfb5941ddfb8c 2024-11-16T12:49:51,993 INFO [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 56327ae7c4c61355f9927cc610edcd05#info#compaction#59 average throughput is 18.98 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-16T12:49:51,994 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/56327ae7c4c61355f9927cc610edcd05/.tmp/info/4922a1cd03bf446b931377a086928cf1 is 1080, key is row0001/info:/1731761379792/Put/seqid=0 2024-11-16T12:49:51,998 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/56327ae7c4c61355f9927cc610edcd05/info/836014b2b0f44119b9fcfb5941ddfb8c, entries=15, sequenceid=65, filesize=20.6 K 2024-11-16T12:49:51,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42515 is added to blk_1073741841_1017 (size=44978) 2024-11-16T12:49:52,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38281 is added to blk_1073741841_1017 (size=44978) 2024-11-16T12:49:52,000 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=11.56 KB/11836 for 56327ae7c4c61355f9927cc610edcd05 in 31ms, sequenceid=65, compaction requested=false 2024-11-16T12:49:52,000 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 56327ae7c4c61355f9927cc610edcd05: 2024-11-16T12:49:52,000 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=74.1 K, sizeToCheck=16.0 K 2024-11-16T12:49:52,000 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T12:49:52,000 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/56327ae7c4c61355f9927cc610edcd05/info/cc015bbc746d49f9a27ea2887c348ac4 because midkey is the same as first or last row 2024-11-16T12:49:52,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33541 {}] regionserver.HRegion(8855): Flush requested on 56327ae7c4c61355f9927cc610edcd05 2024-11-16T12:49:52,001 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 56327ae7c4c61355f9927cc610edcd05 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-16T12:49:52,005 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/56327ae7c4c61355f9927cc610edcd05/.tmp/info/aabe0bfcd17c457a9d6fe628bc4f5d12 is 1080, key is row0053/info:/1731761391971/Put/seqid=0 2024-11-16T12:49:52,006 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/56327ae7c4c61355f9927cc610edcd05/.tmp/info/4922a1cd03bf446b931377a086928cf1 as hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/56327ae7c4c61355f9927cc610edcd05/info/4922a1cd03bf446b931377a086928cf1 2024-11-16T12:49:52,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42515 is added to blk_1073741842_1018 (size=17894) 2024-11-16T12:49:52,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38281 is added to blk_1073741842_1018 (size=17894) 2024-11-16T12:49:52,010 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=80 (bloomFilter=true), to=hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/56327ae7c4c61355f9927cc610edcd05/.tmp/info/aabe0bfcd17c457a9d6fe628bc4f5d12 2024-11-16T12:49:52,014 INFO [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 56327ae7c4c61355f9927cc610edcd05/info of 56327ae7c4c61355f9927cc610edcd05 into 4922a1cd03bf446b931377a086928cf1(size=43.9 K), total size for store is 64.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-16T12:49:52,014 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 56327ae7c4c61355f9927cc610edcd05: 2024-11-16T12:49:52,014 INFO [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731761369689.56327ae7c4c61355f9927cc610edcd05., storeName=56327ae7c4c61355f9927cc610edcd05/info, priority=13, startTime=1731761391968; duration=0sec 2024-11-16T12:49:52,014 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=64.6 K, sizeToCheck=16.0 K 2024-11-16T12:49:52,014 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T12:49:52,014 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/56327ae7c4c61355f9927cc610edcd05/info/4922a1cd03bf446b931377a086928cf1 because midkey is the same as first or last row 2024-11-16T12:49:52,015 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=64.6 K, sizeToCheck=16.0 K 2024-11-16T12:49:52,015 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T12:49:52,015 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/56327ae7c4c61355f9927cc610edcd05/info/4922a1cd03bf446b931377a086928cf1 because midkey is the same as first or last row 2024-11-16T12:49:52,015 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=64.6 K, sizeToCheck=16.0 K 2024-11-16T12:49:52,015 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T12:49:52,015 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/56327ae7c4c61355f9927cc610edcd05/info/4922a1cd03bf446b931377a086928cf1 because midkey is the same as first or last row 2024-11-16T12:49:52,015 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T12:49:52,015 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 56327ae7c4c61355f9927cc610edcd05:info 2024-11-16T12:49:52,015 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/56327ae7c4c61355f9927cc610edcd05/.tmp/info/aabe0bfcd17c457a9d6fe628bc4f5d12 as hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/56327ae7c4c61355f9927cc610edcd05/info/aabe0bfcd17c457a9d6fe628bc4f5d12 2024-11-16T12:49:52,021 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/56327ae7c4c61355f9927cc610edcd05/info/aabe0bfcd17c457a9d6fe628bc4f5d12, entries=12, sequenceid=80, filesize=17.5 K 2024-11-16T12:49:52,022 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=0 B/0 for 56327ae7c4c61355f9927cc610edcd05 in 21ms, sequenceid=80, compaction requested=true 2024-11-16T12:49:52,022 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 56327ae7c4c61355f9927cc610edcd05: 2024-11-16T12:49:52,022 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=82.0 K, sizeToCheck=16.0 K 2024-11-16T12:49:52,022 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T12:49:52,022 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/56327ae7c4c61355f9927cc610edcd05/info/4922a1cd03bf446b931377a086928cf1 because midkey is the same as first or last row 2024-11-16T12:49:52,022 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 56327ae7c4c61355f9927cc610edcd05:info, priority=-2147483648, current under compaction store size is 1 2024-11-16T12:49:52,022 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T12:49:52,022 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-16T12:49:52,023 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 84013 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-16T12:49:52,023 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.HStore(1541): 56327ae7c4c61355f9927cc610edcd05/info is initiating minor compaction (all files) 2024-11-16T12:49:52,023 INFO [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 56327ae7c4c61355f9927cc610edcd05/info in TestLogRolling-testLogRolling,,1731761369689.56327ae7c4c61355f9927cc610edcd05. 2024-11-16T12:49:52,024 INFO [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/56327ae7c4c61355f9927cc610edcd05/info/4922a1cd03bf446b931377a086928cf1, hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/56327ae7c4c61355f9927cc610edcd05/info/836014b2b0f44119b9fcfb5941ddfb8c, hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/56327ae7c4c61355f9927cc610edcd05/info/aabe0bfcd17c457a9d6fe628bc4f5d12] into tmpdir=hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/56327ae7c4c61355f9927cc610edcd05/.tmp, totalSize=82.0 K 2024-11-16T12:49:52,024 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] compactions.Compactor(225): Compacting 4922a1cd03bf446b931377a086928cf1, keycount=37, bloomtype=ROW, size=43.9 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1731761379792 2024-11-16T12:49:52,024 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] compactions.Compactor(225): Compacting 836014b2b0f44119b9fcfb5941ddfb8c, keycount=15, bloomtype=ROW, size=20.6 K, encoding=NONE, compression=NONE, seqNum=65, earliestPutTs=1731761391935 2024-11-16T12:49:52,025 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] compactions.Compactor(225): Compacting aabe0bfcd17c457a9d6fe628bc4f5d12, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=80, earliestPutTs=1731761391971 2024-11-16T12:49:52,036 INFO [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 56327ae7c4c61355f9927cc610edcd05#info#compaction#61 average throughput is 32.84 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-16T12:49:52,036 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/56327ae7c4c61355f9927cc610edcd05/.tmp/info/1df159e6bc5249a097db4245ac5b09a0 is 1080, key is row0001/info:/1731761379792/Put/seqid=0 2024-11-16T12:49:52,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38281 is added to blk_1073741843_1019 (size=74301) 2024-11-16T12:49:52,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42515 is added to blk_1073741843_1019 (size=74301) 2024-11-16T12:49:52,051 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/56327ae7c4c61355f9927cc610edcd05/.tmp/info/1df159e6bc5249a097db4245ac5b09a0 as hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/56327ae7c4c61355f9927cc610edcd05/info/1df159e6bc5249a097db4245ac5b09a0 2024-11-16T12:49:52,058 INFO [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 56327ae7c4c61355f9927cc610edcd05/info of 56327ae7c4c61355f9927cc610edcd05 into 1df159e6bc5249a097db4245ac5b09a0(size=72.6 K), total size for store is 72.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-16T12:49:52,058 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 56327ae7c4c61355f9927cc610edcd05: 2024-11-16T12:49:52,058 INFO [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731761369689.56327ae7c4c61355f9927cc610edcd05., storeName=56327ae7c4c61355f9927cc610edcd05/info, priority=13, startTime=1731761392022; duration=0sec 2024-11-16T12:49:52,058 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=72.6 K, sizeToCheck=16.0 K 2024-11-16T12:49:52,058 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T12:49:52,058 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=72.6 K, sizeToCheck=16.0 K 2024-11-16T12:49:52,058 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T12:49:52,058 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=72.6 K, sizeToCheck=16.0 K 2024-11-16T12:49:52,058 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T12:49:52,059 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1731761369689.56327ae7c4c61355f9927cc610edcd05., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T12:49:52,059 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T12:49:52,060 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 56327ae7c4c61355f9927cc610edcd05:info 2024-11-16T12:49:52,061 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46653 {}] assignment.AssignmentManager(1363): Split request from 0450ab8807f5,33541,1731761368545, parent={ENCODED => 56327ae7c4c61355f9927cc610edcd05, NAME => 'TestLogRolling-testLogRolling,,1731761369689.56327ae7c4c61355f9927cc610edcd05.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-11-16T12:49:52,066 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46653 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=0450ab8807f5,33541,1731761368545 2024-11-16T12:49:52,069 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46653 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=56327ae7c4c61355f9927cc610edcd05, daughterA=e8eaec5ba20e3ee40e68e1509c73db72, daughterB=753427f7c0fbf1885adf61308024b068 2024-11-16T12:49:52,071 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=56327ae7c4c61355f9927cc610edcd05, daughterA=e8eaec5ba20e3ee40e68e1509c73db72, daughterB=753427f7c0fbf1885adf61308024b068 2024-11-16T12:49:52,071 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=56327ae7c4c61355f9927cc610edcd05, daughterA=e8eaec5ba20e3ee40e68e1509c73db72, daughterB=753427f7c0fbf1885adf61308024b068 2024-11-16T12:49:52,071 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=56327ae7c4c61355f9927cc610edcd05, daughterA=e8eaec5ba20e3ee40e68e1509c73db72, daughterB=753427f7c0fbf1885adf61308024b068 2024-11-16T12:49:52,078 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=56327ae7c4c61355f9927cc610edcd05, UNASSIGN}] 2024-11-16T12:49:52,080 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=56327ae7c4c61355f9927cc610edcd05, UNASSIGN 2024-11-16T12:49:52,082 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=56327ae7c4c61355f9927cc610edcd05, regionState=CLOSING, regionLocation=0450ab8807f5,33541,1731761368545 2024-11-16T12:49:52,084 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=56327ae7c4c61355f9927cc610edcd05, UNASSIGN because future has completed 2024-11-16T12:49:52,085 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-11-16T12:49:52,085 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure 56327ae7c4c61355f9927cc610edcd05, server=0450ab8807f5,33541,1731761368545}] 2024-11-16T12:49:52,242 INFO [RS_CLOSE_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(122): Close 56327ae7c4c61355f9927cc610edcd05 2024-11-16T12:49:52,242 DEBUG [RS_CLOSE_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-11-16T12:49:52,243 DEBUG [RS_CLOSE_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1722): Closing 56327ae7c4c61355f9927cc610edcd05, disabling compactions & flushes 2024-11-16T12:49:52,243 INFO [RS_CLOSE_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731761369689.56327ae7c4c61355f9927cc610edcd05. 2024-11-16T12:49:52,243 DEBUG [RS_CLOSE_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731761369689.56327ae7c4c61355f9927cc610edcd05. 2024-11-16T12:49:52,243 DEBUG [RS_CLOSE_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731761369689.56327ae7c4c61355f9927cc610edcd05. after waiting 0 ms 2024-11-16T12:49:52,243 DEBUG [RS_CLOSE_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731761369689.56327ae7c4c61355f9927cc610edcd05. 2024-11-16T12:49:52,244 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731761369689.56327ae7c4c61355f9927cc610edcd05.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/56327ae7c4c61355f9927cc610edcd05/info/59de560982b84ad4adcbe307b6b8707c, hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/56327ae7c4c61355f9927cc610edcd05/info/cc015bbc746d49f9a27ea2887c348ac4, hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/56327ae7c4c61355f9927cc610edcd05/info/4922a1cd03bf446b931377a086928cf1, hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/56327ae7c4c61355f9927cc610edcd05/info/1ebec90106a44f29bd108e7000c00705, hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/56327ae7c4c61355f9927cc610edcd05/info/836014b2b0f44119b9fcfb5941ddfb8c, hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/56327ae7c4c61355f9927cc610edcd05/info/aabe0bfcd17c457a9d6fe628bc4f5d12] to archive 2024-11-16T12:49:52,245 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731761369689.56327ae7c4c61355f9927cc610edcd05.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-16T12:49:52,246 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731761369689.56327ae7c4c61355f9927cc610edcd05.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/56327ae7c4c61355f9927cc610edcd05/info/59de560982b84ad4adcbe307b6b8707c to hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/archive/data/default/TestLogRolling-testLogRolling/56327ae7c4c61355f9927cc610edcd05/info/59de560982b84ad4adcbe307b6b8707c 2024-11-16T12:49:52,248 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731761369689.56327ae7c4c61355f9927cc610edcd05.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/56327ae7c4c61355f9927cc610edcd05/info/cc015bbc746d49f9a27ea2887c348ac4 to hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/archive/data/default/TestLogRolling-testLogRolling/56327ae7c4c61355f9927cc610edcd05/info/cc015bbc746d49f9a27ea2887c348ac4 2024-11-16T12:49:52,250 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731761369689.56327ae7c4c61355f9927cc610edcd05.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/56327ae7c4c61355f9927cc610edcd05/info/4922a1cd03bf446b931377a086928cf1 to hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/archive/data/default/TestLogRolling-testLogRolling/56327ae7c4c61355f9927cc610edcd05/info/4922a1cd03bf446b931377a086928cf1 2024-11-16T12:49:52,251 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731761369689.56327ae7c4c61355f9927cc610edcd05.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/56327ae7c4c61355f9927cc610edcd05/info/1ebec90106a44f29bd108e7000c00705 to hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/archive/data/default/TestLogRolling-testLogRolling/56327ae7c4c61355f9927cc610edcd05/info/1ebec90106a44f29bd108e7000c00705 2024-11-16T12:49:52,252 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731761369689.56327ae7c4c61355f9927cc610edcd05.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/56327ae7c4c61355f9927cc610edcd05/info/836014b2b0f44119b9fcfb5941ddfb8c to hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/archive/data/default/TestLogRolling-testLogRolling/56327ae7c4c61355f9927cc610edcd05/info/836014b2b0f44119b9fcfb5941ddfb8c 2024-11-16T12:49:52,253 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731761369689.56327ae7c4c61355f9927cc610edcd05.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/56327ae7c4c61355f9927cc610edcd05/info/aabe0bfcd17c457a9d6fe628bc4f5d12 to hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/archive/data/default/TestLogRolling-testLogRolling/56327ae7c4c61355f9927cc610edcd05/info/aabe0bfcd17c457a9d6fe628bc4f5d12 2024-11-16T12:49:52,259 DEBUG [RS_CLOSE_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/56327ae7c4c61355f9927cc610edcd05/recovered.edits/85.seqid, newMaxSeqId=85, maxSeqId=1 2024-11-16T12:49:52,259 INFO [RS_CLOSE_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731761369689.56327ae7c4c61355f9927cc610edcd05. 2024-11-16T12:49:52,260 DEBUG [RS_CLOSE_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1676): Region close journal for 56327ae7c4c61355f9927cc610edcd05: Waiting for close lock at 1731761392243Running coprocessor pre-close hooks at 1731761392243Disabling compacts and flushes for region at 1731761392243Disabling writes for close at 1731761392243Writing region close event to WAL at 1731761392255 (+12 ms)Running coprocessor post-close hooks at 1731761392259 (+4 ms)Closed at 1731761392259 2024-11-16T12:49:52,262 INFO [RS_CLOSE_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(157): Closed 56327ae7c4c61355f9927cc610edcd05 2024-11-16T12:49:52,263 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=56327ae7c4c61355f9927cc610edcd05, regionState=CLOSED 2024-11-16T12:49:52,265 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure 56327ae7c4c61355f9927cc610edcd05, server=0450ab8807f5,33541,1731761368545 because future has completed 2024-11-16T12:49:52,267 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=8 2024-11-16T12:49:52,268 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=8, state=SUCCESS, hasLock=false; CloseRegionProcedure 56327ae7c4c61355f9927cc610edcd05, server=0450ab8807f5,33541,1731761368545 in 181 msec 2024-11-16T12:49:52,270 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-16T12:49:52,270 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=56327ae7c4c61355f9927cc610edcd05, UNASSIGN in 190 msec 2024-11-16T12:49:52,277 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:49:52,280 INFO [PEWorker-4 {}] assignment.SplitTableRegionProcedure(728): pid=7 splitting 1 storefiles, region=56327ae7c4c61355f9927cc610edcd05, threads=1 2024-11-16T12:49:52,282 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/56327ae7c4c61355f9927cc610edcd05/info/1df159e6bc5249a097db4245ac5b09a0 for region: 56327ae7c4c61355f9927cc610edcd05 2024-11-16T12:49:52,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42515 is added to blk_1073741844_1020 (size=27) 2024-11-16T12:49:52,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38281 is added to blk_1073741844_1020 (size=27) 2024-11-16T12:49:52,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42515 is added to blk_1073741845_1021 (size=27) 2024-11-16T12:49:52,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38281 is added to blk_1073741845_1021 (size=27) 2024-11-16T12:49:52,312 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/56327ae7c4c61355f9927cc610edcd05/info/1df159e6bc5249a097db4245ac5b09a0 for region: 56327ae7c4c61355f9927cc610edcd05 2024-11-16T12:49:52,315 DEBUG [PEWorker-4 {}] assignment.SplitTableRegionProcedure(802): pid=7 split storefiles for region 56327ae7c4c61355f9927cc610edcd05 Daughter A: [hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/e8eaec5ba20e3ee40e68e1509c73db72/info/1df159e6bc5249a097db4245ac5b09a0.56327ae7c4c61355f9927cc610edcd05] storefiles, Daughter B: [hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/1df159e6bc5249a097db4245ac5b09a0.56327ae7c4c61355f9927cc610edcd05] storefiles. 2024-11-16T12:49:52,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38281 is added to blk_1073741846_1022 (size=71) 2024-11-16T12:49:52,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42515 is added to blk_1073741846_1022 (size=71) 2024-11-16T12:49:52,325 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:49:52,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42515 is added to blk_1073741847_1023 (size=71) 2024-11-16T12:49:52,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38281 is added to blk_1073741847_1023 (size=71) 2024-11-16T12:49:52,338 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:49:52,346 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/e8eaec5ba20e3ee40e68e1509c73db72/recovered.edits/85.seqid, newMaxSeqId=85, maxSeqId=-1 2024-11-16T12:49:52,348 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/recovered.edits/85.seqid, newMaxSeqId=85, maxSeqId=-1 2024-11-16T12:49:52,350 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1731761369689.56327ae7c4c61355f9927cc610edcd05.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1731761392350"},{"qualifier":"splitA","vlen":70,"tag":[],"timestamp":"1731761392350"},{"qualifier":"splitB","vlen":70,"tag":[],"timestamp":"1731761392350"}]},"ts":"1731761392350"} 2024-11-16T12:49:52,351 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1731761392066.e8eaec5ba20e3ee40e68e1509c73db72.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1731761392350"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731761392350"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1731761392350"}]},"ts":"1731761392350"} 2024-11-16T12:49:52,351 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,row0062,1731761392066.753427f7c0fbf1885adf61308024b068.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1731761392350"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731761392350"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1731761392350"}]},"ts":"1731761392350"} 2024-11-16T12:49:52,370 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=e8eaec5ba20e3ee40e68e1509c73db72, ASSIGN}, {pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=753427f7c0fbf1885adf61308024b068, ASSIGN}] 2024-11-16T12:49:52,372 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=e8eaec5ba20e3ee40e68e1509c73db72, ASSIGN 2024-11-16T12:49:52,372 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=753427f7c0fbf1885adf61308024b068, ASSIGN 2024-11-16T12:49:52,373 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=e8eaec5ba20e3ee40e68e1509c73db72, ASSIGN; state=SPLITTING_NEW, location=0450ab8807f5,33541,1731761368545; forceNewPlan=false, retain=false 2024-11-16T12:49:52,373 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=753427f7c0fbf1885adf61308024b068, ASSIGN; state=SPLITTING_NEW, location=0450ab8807f5,33541,1731761368545; forceNewPlan=false, retain=false 2024-11-16T12:49:52,412 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:52,523 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=e8eaec5ba20e3ee40e68e1509c73db72, regionState=OPENING, regionLocation=0450ab8807f5,33541,1731761368545 2024-11-16T12:49:52,523 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=753427f7c0fbf1885adf61308024b068, regionState=OPENING, regionLocation=0450ab8807f5,33541,1731761368545 2024-11-16T12:49:52,526 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=753427f7c0fbf1885adf61308024b068, ASSIGN because future has completed 2024-11-16T12:49:52,527 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 753427f7c0fbf1885adf61308024b068, server=0450ab8807f5,33541,1731761368545}] 2024-11-16T12:49:52,528 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=e8eaec5ba20e3ee40e68e1509c73db72, ASSIGN because future has completed 2024-11-16T12:49:52,528 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure e8eaec5ba20e3ee40e68e1509c73db72, server=0450ab8807f5,33541,1731761368545}] 2024-11-16T12:49:52,609 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:52,682 INFO [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,row0062,1731761392066.753427f7c0fbf1885adf61308024b068. 2024-11-16T12:49:52,683 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7752): Opening region: {ENCODED => 753427f7c0fbf1885adf61308024b068, NAME => 'TestLogRolling-testLogRolling,row0062,1731761392066.753427f7c0fbf1885adf61308024b068.', STARTKEY => 'row0062', ENDKEY => ''} 2024-11-16T12:49:52,683 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 753427f7c0fbf1885adf61308024b068 2024-11-16T12:49:52,683 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,row0062,1731761392066.753427f7c0fbf1885adf61308024b068.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T12:49:52,683 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7794): checking encryption for 753427f7c0fbf1885adf61308024b068 2024-11-16T12:49:52,683 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7797): checking classloading for 753427f7c0fbf1885adf61308024b068 2024-11-16T12:49:52,684 INFO [StoreOpener-753427f7c0fbf1885adf61308024b068-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 753427f7c0fbf1885adf61308024b068 2024-11-16T12:49:52,685 INFO [StoreOpener-753427f7c0fbf1885adf61308024b068-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 753427f7c0fbf1885adf61308024b068 columnFamilyName info 2024-11-16T12:49:52,685 DEBUG [StoreOpener-753427f7c0fbf1885adf61308024b068-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:49:52,695 DEBUG [StoreOpener-753427f7c0fbf1885adf61308024b068-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/1df159e6bc5249a097db4245ac5b09a0.56327ae7c4c61355f9927cc610edcd05->hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/56327ae7c4c61355f9927cc610edcd05/info/1df159e6bc5249a097db4245ac5b09a0-top 2024-11-16T12:49:52,695 INFO [StoreOpener-753427f7c0fbf1885adf61308024b068-1 {}] regionserver.HStore(327): Store=753427f7c0fbf1885adf61308024b068/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T12:49:52,695 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1038): replaying wal for 753427f7c0fbf1885adf61308024b068 2024-11-16T12:49:52,696 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068 2024-11-16T12:49:52,697 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068 2024-11-16T12:49:52,697 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1048): stopping wal replay for 753427f7c0fbf1885adf61308024b068 2024-11-16T12:49:52,697 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1060): Cleaning up temporary data for 753427f7c0fbf1885adf61308024b068 2024-11-16T12:49:52,699 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1093): writing seq id for 753427f7c0fbf1885adf61308024b068 2024-11-16T12:49:52,699 INFO [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1114): Opened 753427f7c0fbf1885adf61308024b068; next sequenceid=86; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=813939, jitterRate=0.034977853298187256}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-16T12:49:52,700 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 753427f7c0fbf1885adf61308024b068 2024-11-16T12:49:52,700 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1006): Region open journal for 753427f7c0fbf1885adf61308024b068: Running coprocessor pre-open hook at 1731761392683Writing region info on filesystem at 1731761392683Initializing all the Stores at 1731761392684 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731761392684Cleaning up temporary data from old regions at 1731761392697 (+13 ms)Running coprocessor post-open hooks at 1731761392700 (+3 ms)Region opened successfully at 1731761392700 2024-11-16T12:49:52,701 INFO [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,row0062,1731761392066.753427f7c0fbf1885adf61308024b068., pid=12, masterSystemTime=1731761392679 2024-11-16T12:49:52,701 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(403): Add compact mark for store 753427f7c0fbf1885adf61308024b068:info, priority=-2147483648, current under compaction store size is 1 2024-11-16T12:49:52,701 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-11-16T12:49:52,701 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T12:49:52,702 INFO [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,row0062,1731761392066.753427f7c0fbf1885adf61308024b068. 2024-11-16T12:49:52,702 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.HStore(1541): 753427f7c0fbf1885adf61308024b068/info is initiating minor compaction (all files) 2024-11-16T12:49:52,702 INFO [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 753427f7c0fbf1885adf61308024b068/info in TestLogRolling-testLogRolling,row0062,1731761392066.753427f7c0fbf1885adf61308024b068. 2024-11-16T12:49:52,702 INFO [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/1df159e6bc5249a097db4245ac5b09a0.56327ae7c4c61355f9927cc610edcd05->hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/56327ae7c4c61355f9927cc610edcd05/info/1df159e6bc5249a097db4245ac5b09a0-top] into tmpdir=hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/.tmp, totalSize=72.6 K 2024-11-16T12:49:52,703 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1df159e6bc5249a097db4245ac5b09a0.56327ae7c4c61355f9927cc610edcd05, keycount=32, bloomtype=ROW, size=72.6 K, encoding=NONE, compression=NONE, seqNum=81, earliestPutTs=1731761379792 2024-11-16T12:49:52,703 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,row0062,1731761392066.753427f7c0fbf1885adf61308024b068. 2024-11-16T12:49:52,703 INFO [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,row0062,1731761392066.753427f7c0fbf1885adf61308024b068. 2024-11-16T12:49:52,704 INFO [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1731761392066.e8eaec5ba20e3ee40e68e1509c73db72. 2024-11-16T12:49:52,704 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7752): Opening region: {ENCODED => e8eaec5ba20e3ee40e68e1509c73db72, NAME => 'TestLogRolling-testLogRolling,,1731761392066.e8eaec5ba20e3ee40e68e1509c73db72.', STARTKEY => '', ENDKEY => 'row0062'} 2024-11-16T12:49:52,704 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling e8eaec5ba20e3ee40e68e1509c73db72 2024-11-16T12:49:52,704 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731761392066.e8eaec5ba20e3ee40e68e1509c73db72.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T12:49:52,704 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7794): checking encryption for e8eaec5ba20e3ee40e68e1509c73db72 2024-11-16T12:49:52,704 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7797): checking classloading for e8eaec5ba20e3ee40e68e1509c73db72 2024-11-16T12:49:52,705 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=753427f7c0fbf1885adf61308024b068, regionState=OPEN, openSeqNum=86, regionLocation=0450ab8807f5,33541,1731761368545 2024-11-16T12:49:52,707 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33541 {}] regionserver.HRegion(8855): Flush requested on 1588230740 2024-11-16T12:49:52,707 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-11-16T12:49:52,708 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=5.15 KB heapSize=9 KB 2024-11-16T12:49:52,708 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 753427f7c0fbf1885adf61308024b068, server=0450ab8807f5,33541,1731761368545 because future has completed 2024-11-16T12:49:52,714 INFO [StoreOpener-e8eaec5ba20e3ee40e68e1509c73db72-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region e8eaec5ba20e3ee40e68e1509c73db72 2024-11-16T12:49:52,718 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-11-16T12:49:52,718 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; OpenRegionProcedure 753427f7c0fbf1885adf61308024b068, server=0450ab8807f5,33541,1731761368545 in 189 msec 2024-11-16T12:49:52,720 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=753427f7c0fbf1885adf61308024b068, ASSIGN in 348 msec 2024-11-16T12:49:52,727 INFO [StoreOpener-e8eaec5ba20e3ee40e68e1509c73db72-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e8eaec5ba20e3ee40e68e1509c73db72 columnFamilyName info 2024-11-16T12:49:52,727 DEBUG [StoreOpener-e8eaec5ba20e3ee40e68e1509c73db72-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:49:52,730 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/hbase/meta/1588230740/.tmp/info/b43283e1f9224d6a886e219edbae778b is 193, key is TestLogRolling-testLogRolling,row0062,1731761392066.753427f7c0fbf1885adf61308024b068./info:regioninfo/1731761392705/Put/seqid=0 2024-11-16T12:49:52,732 INFO [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 753427f7c0fbf1885adf61308024b068#info#compaction#63 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-16T12:49:52,733 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/.tmp/info/f37eceb1f8094c66b2f787e2ea2f0939 is 1080, key is row0062/info:/1731761391994/Put/seqid=0 2024-11-16T12:49:52,737 DEBUG [StoreOpener-e8eaec5ba20e3ee40e68e1509c73db72-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/e8eaec5ba20e3ee40e68e1509c73db72/info/1df159e6bc5249a097db4245ac5b09a0.56327ae7c4c61355f9927cc610edcd05->hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/56327ae7c4c61355f9927cc610edcd05/info/1df159e6bc5249a097db4245ac5b09a0-bottom 2024-11-16T12:49:52,737 INFO [StoreOpener-e8eaec5ba20e3ee40e68e1509c73db72-1 {}] regionserver.HStore(327): Store=e8eaec5ba20e3ee40e68e1509c73db72/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T12:49:52,738 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1038): replaying wal for e8eaec5ba20e3ee40e68e1509c73db72 2024-11-16T12:49:52,738 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/e8eaec5ba20e3ee40e68e1509c73db72 2024-11-16T12:49:52,739 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/e8eaec5ba20e3ee40e68e1509c73db72 2024-11-16T12:49:52,740 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1048): stopping wal replay for e8eaec5ba20e3ee40e68e1509c73db72 2024-11-16T12:49:52,740 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1060): Cleaning up temporary data for e8eaec5ba20e3ee40e68e1509c73db72 2024-11-16T12:49:52,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42515 is added to blk_1073741848_1024 (size=9882) 2024-11-16T12:49:52,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38281 is added to blk_1073741848_1024 (size=9882) 2024-11-16T12:49:52,742 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.95 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/hbase/meta/1588230740/.tmp/info/b43283e1f9224d6a886e219edbae778b 2024-11-16T12:49:52,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38281 is added to blk_1073741849_1025 (size=8260) 2024-11-16T12:49:52,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42515 is added to blk_1073741849_1025 (size=8260) 2024-11-16T12:49:52,743 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1093): writing seq id for e8eaec5ba20e3ee40e68e1509c73db72 2024-11-16T12:49:52,745 INFO [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1114): Opened e8eaec5ba20e3ee40e68e1509c73db72; next sequenceid=86; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=694929, jitterRate=-0.11635267734527588}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-16T12:49:52,745 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1122): Running coprocessor post-open hooks for e8eaec5ba20e3ee40e68e1509c73db72 2024-11-16T12:49:52,745 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1006): Region open journal for e8eaec5ba20e3ee40e68e1509c73db72: Running coprocessor pre-open hook at 1731761392704Writing region info on filesystem at 1731761392704Initializing all the Stores at 1731761392705 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731761392705Cleaning up temporary data from old regions at 1731761392740 (+35 ms)Running coprocessor post-open hooks at 1731761392745 (+5 ms)Region opened successfully at 1731761392745 2024-11-16T12:49:52,746 INFO [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1731761392066.e8eaec5ba20e3ee40e68e1509c73db72., pid=13, masterSystemTime=1731761392679 2024-11-16T12:49:52,746 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(403): Add compact mark for store e8eaec5ba20e3ee40e68e1509c73db72:info, priority=-2147483648, current under compaction store size is 2 2024-11-16T12:49:52,746 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T12:49:52,746 DEBUG [RS:0;0450ab8807f5:33541-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-11-16T12:49:52,747 INFO [RS:0;0450ab8807f5:33541-longCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,,1731761392066.e8eaec5ba20e3ee40e68e1509c73db72. 2024-11-16T12:49:52,748 DEBUG [RS:0;0450ab8807f5:33541-longCompactions-0 {}] regionserver.HStore(1541): e8eaec5ba20e3ee40e68e1509c73db72/info is initiating minor compaction (all files) 2024-11-16T12:49:52,748 INFO [RS:0;0450ab8807f5:33541-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of e8eaec5ba20e3ee40e68e1509c73db72/info in TestLogRolling-testLogRolling,,1731761392066.e8eaec5ba20e3ee40e68e1509c73db72. 2024-11-16T12:49:52,748 INFO [RS:0;0450ab8807f5:33541-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/e8eaec5ba20e3ee40e68e1509c73db72/info/1df159e6bc5249a097db4245ac5b09a0.56327ae7c4c61355f9927cc610edcd05->hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/56327ae7c4c61355f9927cc610edcd05/info/1df159e6bc5249a097db4245ac5b09a0-bottom] into tmpdir=hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/e8eaec5ba20e3ee40e68e1509c73db72/.tmp, totalSize=72.6 K 2024-11-16T12:49:52,749 DEBUG [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1731761392066.e8eaec5ba20e3ee40e68e1509c73db72. 2024-11-16T12:49:52,749 INFO [RS_OPEN_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1731761392066.e8eaec5ba20e3ee40e68e1509c73db72. 2024-11-16T12:49:52,749 DEBUG [RS:0;0450ab8807f5:33541-longCompactions-0 {}] compactions.Compactor(225): Compacting 1df159e6bc5249a097db4245ac5b09a0.56327ae7c4c61355f9927cc610edcd05, keycount=32, bloomtype=ROW, size=72.6 K, encoding=NONE, compression=NONE, seqNum=80, earliestPutTs=1731761379792 2024-11-16T12:49:52,750 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=e8eaec5ba20e3ee40e68e1509c73db72, regionState=OPEN, openSeqNum=86, regionLocation=0450ab8807f5,33541,1731761368545 2024-11-16T12:49:52,752 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/.tmp/info/f37eceb1f8094c66b2f787e2ea2f0939 as hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/f37eceb1f8094c66b2f787e2ea2f0939 2024-11-16T12:49:52,752 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=13, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure e8eaec5ba20e3ee40e68e1509c73db72, server=0450ab8807f5,33541,1731761368545 because future has completed 2024-11-16T12:49:52,763 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=13, resume processing ppid=10 2024-11-16T12:49:52,763 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=10, state=SUCCESS, hasLock=false; OpenRegionProcedure e8eaec5ba20e3ee40e68e1509c73db72, server=0450ab8807f5,33541,1731761368545 in 227 msec 2024-11-16T12:49:52,766 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=7 2024-11-16T12:49:52,766 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=e8eaec5ba20e3ee40e68e1509c73db72, ASSIGN in 393 msec 2024-11-16T12:49:52,766 INFO [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 1 (all) file(s) in 753427f7c0fbf1885adf61308024b068/info of 753427f7c0fbf1885adf61308024b068 into f37eceb1f8094c66b2f787e2ea2f0939(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-16T12:49:52,766 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 753427f7c0fbf1885adf61308024b068: 2024-11-16T12:49:52,766 INFO [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731761392066.753427f7c0fbf1885adf61308024b068., storeName=753427f7c0fbf1885adf61308024b068/info, priority=15, startTime=1731761392701; duration=0sec 2024-11-16T12:49:52,766 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T12:49:52,766 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 753427f7c0fbf1885adf61308024b068:info 2024-11-16T12:49:52,767 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=56327ae7c4c61355f9927cc610edcd05, daughterA=e8eaec5ba20e3ee40e68e1509c73db72, daughterB=753427f7c0fbf1885adf61308024b068 in 700 msec 2024-11-16T12:49:52,773 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/hbase/meta/1588230740/.tmp/ns/5f5e31dced7444069615ee9c6c446c89 is 43, key is default/ns:d/1731761369620/Put/seqid=0 2024-11-16T12:49:52,777 INFO [RS:0;0450ab8807f5:33541-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e8eaec5ba20e3ee40e68e1509c73db72#info#compaction#65 average throughput is 20.87 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-16T12:49:52,778 DEBUG [RS:0;0450ab8807f5:33541-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/e8eaec5ba20e3ee40e68e1509c73db72/.tmp/info/b2ec31b49ede48318f68b43ad91aabe1 is 1080, key is row0001/info:/1731761379792/Put/seqid=0 2024-11-16T12:49:52,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38281 is added to blk_1073741850_1026 (size=5153) 2024-11-16T12:49:52,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42515 is added to blk_1073741850_1026 (size=5153) 2024-11-16T12:49:52,782 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/hbase/meta/1588230740/.tmp/ns/5f5e31dced7444069615ee9c6c446c89 2024-11-16T12:49:52,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38281 is added to blk_1073741851_1027 (size=70862) 2024-11-16T12:49:52,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42515 is added to blk_1073741851_1027 (size=70862) 2024-11-16T12:49:52,800 DEBUG [RS:0;0450ab8807f5:33541-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/e8eaec5ba20e3ee40e68e1509c73db72/.tmp/info/b2ec31b49ede48318f68b43ad91aabe1 as hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/e8eaec5ba20e3ee40e68e1509c73db72/info/b2ec31b49ede48318f68b43ad91aabe1 2024-11-16T12:49:52,808 INFO [RS:0;0450ab8807f5:33541-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 1 (all) file(s) in e8eaec5ba20e3ee40e68e1509c73db72/info of e8eaec5ba20e3ee40e68e1509c73db72 into b2ec31b49ede48318f68b43ad91aabe1(size=69.2 K), total size for store is 69.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-16T12:49:52,808 DEBUG [RS:0;0450ab8807f5:33541-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for e8eaec5ba20e3ee40e68e1509c73db72: 2024-11-16T12:49:52,808 INFO [RS:0;0450ab8807f5:33541-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731761392066.e8eaec5ba20e3ee40e68e1509c73db72., storeName=e8eaec5ba20e3ee40e68e1509c73db72/info, priority=15, startTime=1731761392746; duration=0sec 2024-11-16T12:49:52,808 DEBUG [RS:0;0450ab8807f5:33541-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T12:49:52,808 DEBUG [RS:0;0450ab8807f5:33541-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e8eaec5ba20e3ee40e68e1509c73db72:info 2024-11-16T12:49:52,810 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/hbase/meta/1588230740/.tmp/table/5f13a414d3024ef5b54de27b11499e12 is 65, key is TestLogRolling-testLogRolling/table:state/1731761370066/Put/seqid=0 2024-11-16T12:49:52,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42515 is added to blk_1073741852_1028 (size=5340) 2024-11-16T12:49:52,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38281 is added to blk_1073741852_1028 (size=5340) 2024-11-16T12:49:52,815 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=122 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/hbase/meta/1588230740/.tmp/table/5f13a414d3024ef5b54de27b11499e12 2024-11-16T12:49:52,821 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/hbase/meta/1588230740/.tmp/info/b43283e1f9224d6a886e219edbae778b as hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/hbase/meta/1588230740/info/b43283e1f9224d6a886e219edbae778b 2024-11-16T12:49:52,827 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/hbase/meta/1588230740/info/b43283e1f9224d6a886e219edbae778b, entries=30, sequenceid=17, filesize=9.7 K 2024-11-16T12:49:52,828 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/hbase/meta/1588230740/.tmp/ns/5f5e31dced7444069615ee9c6c446c89 as hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/hbase/meta/1588230740/ns/5f5e31dced7444069615ee9c6c446c89 2024-11-16T12:49:52,834 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/hbase/meta/1588230740/ns/5f5e31dced7444069615ee9c6c446c89, entries=2, sequenceid=17, filesize=5.0 K 2024-11-16T12:49:52,835 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/hbase/meta/1588230740/.tmp/table/5f13a414d3024ef5b54de27b11499e12 as hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/hbase/meta/1588230740/table/5f13a414d3024ef5b54de27b11499e12 2024-11-16T12:49:52,847 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/hbase/meta/1588230740/table/5f13a414d3024ef5b54de27b11499e12, entries=2, sequenceid=17, filesize=5.2 K 2024-11-16T12:49:52,848 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~5.15 KB/5269, heapSize ~8.70 KB/8912, currentSize=670 B/670 for 1588230740 in 141ms, sequenceid=17, compaction requested=false 2024-11-16T12:49:52,848 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-16T12:49:53,412 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:53,609 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:54,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33541 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:53098 deadline: 1731761404002, exception=org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731761369689.56327ae7c4c61355f9927cc610edcd05. is not online on 0450ab8807f5,33541,1731761368545 2024-11-16T12:49:54,003 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1731761369689.56327ae7c4c61355f9927cc610edcd05., hostname=0450ab8807f5,33541,1731761368545, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1731761369689.56327ae7c4c61355f9927cc610edcd05., hostname=0450ab8807f5,33541,1731761368545, seqNum=2, error=org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731761369689.56327ae7c4c61355f9927cc610edcd05. is not online on 0450ab8807f5,33541,1731761368545 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-16T12:49:54,003 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1731761369689.56327ae7c4c61355f9927cc610edcd05., hostname=0450ab8807f5,33541,1731761368545, seqNum=2 is org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731761369689.56327ae7c4c61355f9927cc610edcd05. is not online on 0450ab8807f5,33541,1731761368545 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-16T12:49:54,003 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(88): Try removing region=TestLogRolling-testLogRolling,,1731761369689.56327ae7c4c61355f9927cc610edcd05., hostname=0450ab8807f5,33541,1731761368545, seqNum=2 from cache 2024-11-16T12:49:54,413 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:54,610 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:55,414 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:55,611 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:56,414 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:56,612 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:57,260 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:49:57,260 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:49:57,261 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:49:57,261 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:49:57,261 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:49:57,261 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:49:57,262 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:49:57,262 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:49:57,293 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:49:57,293 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:49:57,293 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:49:57,293 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:49:57,293 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:49:57,293 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:49:57,298 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:49:57,298 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:49:57,298 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:49:57,301 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:49:57,415 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:57,612 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:57,811 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-16T12:49:57,812 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:49:57,812 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:49:57,813 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:49:57,813 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:49:57,813 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:49:57,813 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:49:57,814 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:49:57,814 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:49:57,841 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:49:57,842 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:49:57,842 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:49:57,842 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:49:57,842 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:49:57,843 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:49:57,847 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:49:57,847 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:49:57,847 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:49:57,850 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T12:49:58,385 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-16T12:49:58,416 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:58,613 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:59,417 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:49:59,614 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:50:00,417 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:50:00,614 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:50:01,418 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:50:01,615 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:50:02,419 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:50:02,616 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:50:03,420 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:50:03,616 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:50:04,065 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0065', locateType=CURRENT is [region=TestLogRolling-testLogRolling,row0062,1731761392066.753427f7c0fbf1885adf61308024b068., hostname=0450ab8807f5,33541,1731761368545, seqNum=86] 2024-11-16T12:50:04,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33541 {}] regionserver.HRegion(8855): Flush requested on 753427f7c0fbf1885adf61308024b068 2024-11-16T12:50:04,079 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 753427f7c0fbf1885adf61308024b068 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-16T12:50:04,084 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/.tmp/info/dae8c05829bd4be2b3fbe4d74c3afd14 is 1080, key is row0065/info:/1731761404067/Put/seqid=0 2024-11-16T12:50:04,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42515 is added to blk_1073741853_1029 (size=12509) 2024-11-16T12:50:04,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38281 is added to blk_1073741853_1029 (size=12509) 2024-11-16T12:50:04,089 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=96 (bloomFilter=true), to=hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/.tmp/info/dae8c05829bd4be2b3fbe4d74c3afd14 2024-11-16T12:50:04,095 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/.tmp/info/dae8c05829bd4be2b3fbe4d74c3afd14 as hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/dae8c05829bd4be2b3fbe4d74c3afd14 2024-11-16T12:50:04,101 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/dae8c05829bd4be2b3fbe4d74c3afd14, entries=7, sequenceid=96, filesize=12.2 K 2024-11-16T12:50:04,102 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=10.51 KB/10760 for 753427f7c0fbf1885adf61308024b068 in 23ms, sequenceid=96, compaction requested=false 2024-11-16T12:50:04,102 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 753427f7c0fbf1885adf61308024b068: 2024-11-16T12:50:04,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33541 {}] regionserver.HRegion(8855): Flush requested on 753427f7c0fbf1885adf61308024b068 2024-11-16T12:50:04,102 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 753427f7c0fbf1885adf61308024b068 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-16T12:50:04,116 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/.tmp/info/a644592b64ea49c1a9bf69344526bd0f is 1080, key is row0072/info:/1731761404080/Put/seqid=0 2024-11-16T12:50:04,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42515 is added to blk_1073741854_1030 (size=16817) 2024-11-16T12:50:04,122 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=110 (bloomFilter=true), to=hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/.tmp/info/a644592b64ea49c1a9bf69344526bd0f 2024-11-16T12:50:04,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38281 is added to blk_1073741854_1030 (size=16817) 2024-11-16T12:50:04,129 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/.tmp/info/a644592b64ea49c1a9bf69344526bd0f as hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/a644592b64ea49c1a9bf69344526bd0f 2024-11-16T12:50:04,135 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/a644592b64ea49c1a9bf69344526bd0f, entries=11, sequenceid=110, filesize=16.4 K 2024-11-16T12:50:04,136 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=14.71 KB/15064 for 753427f7c0fbf1885adf61308024b068 in 34ms, sequenceid=110, compaction requested=true 2024-11-16T12:50:04,136 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 753427f7c0fbf1885adf61308024b068: 2024-11-16T12:50:04,136 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 753427f7c0fbf1885adf61308024b068:info, priority=-2147483648, current under compaction store size is 1 2024-11-16T12:50:04,136 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T12:50:04,136 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-16T12:50:04,137 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37586 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-16T12:50:04,137 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.HStore(1541): 753427f7c0fbf1885adf61308024b068/info is initiating minor compaction (all files) 2024-11-16T12:50:04,138 INFO [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 753427f7c0fbf1885adf61308024b068/info in TestLogRolling-testLogRolling,row0062,1731761392066.753427f7c0fbf1885adf61308024b068. 2024-11-16T12:50:04,138 INFO [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/f37eceb1f8094c66b2f787e2ea2f0939, hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/dae8c05829bd4be2b3fbe4d74c3afd14, hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/a644592b64ea49c1a9bf69344526bd0f] into tmpdir=hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/.tmp, totalSize=36.7 K 2024-11-16T12:50:04,138 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] compactions.Compactor(225): Compacting f37eceb1f8094c66b2f787e2ea2f0939, keycount=3, bloomtype=ROW, size=8.1 K, encoding=NONE, compression=NONE, seqNum=81, earliestPutTs=1731761391994 2024-11-16T12:50:04,138 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] compactions.Compactor(225): Compacting dae8c05829bd4be2b3fbe4d74c3afd14, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=96, earliestPutTs=1731761404067 2024-11-16T12:50:04,139 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] compactions.Compactor(225): Compacting a644592b64ea49c1a9bf69344526bd0f, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=110, earliestPutTs=1731761404080 2024-11-16T12:50:04,150 INFO [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 753427f7c0fbf1885adf61308024b068#info#compaction#69 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-16T12:50:04,150 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/.tmp/info/67654f8c2fe2454b8468cef3967cc67e is 1080, key is row0062/info:/1731761391994/Put/seqid=0 2024-11-16T12:50:04,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38281 is added to blk_1073741855_1031 (size=27778) 2024-11-16T12:50:04,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42515 is added to blk_1073741855_1031 (size=27778) 2024-11-16T12:50:04,161 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/.tmp/info/67654f8c2fe2454b8468cef3967cc67e as hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/67654f8c2fe2454b8468cef3967cc67e 2024-11-16T12:50:04,167 INFO [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 753427f7c0fbf1885adf61308024b068/info of 753427f7c0fbf1885adf61308024b068 into 67654f8c2fe2454b8468cef3967cc67e(size=27.1 K), total size for store is 27.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-16T12:50:04,167 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 753427f7c0fbf1885adf61308024b068: 2024-11-16T12:50:04,167 INFO [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731761392066.753427f7c0fbf1885adf61308024b068., storeName=753427f7c0fbf1885adf61308024b068/info, priority=13, startTime=1731761404136; duration=0sec 2024-11-16T12:50:04,167 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T12:50:04,167 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 753427f7c0fbf1885adf61308024b068:info 2024-11-16T12:50:04,420 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:50:04,617 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:50:05,421 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:50:05,618 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:50:06,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33541 {}] regionserver.HRegion(8855): Flush requested on 753427f7c0fbf1885adf61308024b068 2024-11-16T12:50:06,138 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 753427f7c0fbf1885adf61308024b068 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-11-16T12:50:06,145 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/.tmp/info/1eb824ea41f34784b9ba218158a5ffbf is 1080, key is row0083/info:/1731761404104/Put/seqid=0 2024-11-16T12:50:06,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38281 is added to blk_1073741856_1032 (size=21142) 2024-11-16T12:50:06,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42515 is added to blk_1073741856_1032 (size=21142) 2024-11-16T12:50:06,151 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/.tmp/info/1eb824ea41f34784b9ba218158a5ffbf 2024-11-16T12:50:06,157 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/.tmp/info/1eb824ea41f34784b9ba218158a5ffbf as hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/1eb824ea41f34784b9ba218158a5ffbf 2024-11-16T12:50:06,163 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/1eb824ea41f34784b9ba218158a5ffbf, entries=15, sequenceid=129, filesize=20.6 K 2024-11-16T12:50:06,164 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=10.51 KB/10760 for 753427f7c0fbf1885adf61308024b068 in 26ms, sequenceid=129, compaction requested=false 2024-11-16T12:50:06,164 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 753427f7c0fbf1885adf61308024b068: 2024-11-16T12:50:06,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33541 {}] regionserver.HRegion(8855): Flush requested on 753427f7c0fbf1885adf61308024b068 2024-11-16T12:50:06,165 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 753427f7c0fbf1885adf61308024b068 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-16T12:50:06,170 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/.tmp/info/28bbac39cde3424580b64f1d6537932e is 1080, key is row0098/info:/1731761406140/Put/seqid=0 2024-11-16T12:50:06,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38281 is added to blk_1073741857_1033 (size=16828) 2024-11-16T12:50:06,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42515 is added to blk_1073741857_1033 (size=16828) 2024-11-16T12:50:06,175 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=143 (bloomFilter=true), to=hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/.tmp/info/28bbac39cde3424580b64f1d6537932e 2024-11-16T12:50:06,181 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/.tmp/info/28bbac39cde3424580b64f1d6537932e as hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/28bbac39cde3424580b64f1d6537932e 2024-11-16T12:50:06,187 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/28bbac39cde3424580b64f1d6537932e, entries=11, sequenceid=143, filesize=16.4 K 2024-11-16T12:50:06,188 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=9.46 KB/9684 for 753427f7c0fbf1885adf61308024b068 in 23ms, sequenceid=143, compaction requested=true 2024-11-16T12:50:06,188 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 753427f7c0fbf1885adf61308024b068: 2024-11-16T12:50:06,188 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 753427f7c0fbf1885adf61308024b068:info, priority=-2147483648, current under compaction store size is 1 2024-11-16T12:50:06,188 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T12:50:06,188 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-16T12:50:06,190 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 65748 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-16T12:50:06,190 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.HStore(1541): 753427f7c0fbf1885adf61308024b068/info is initiating minor compaction (all files) 2024-11-16T12:50:06,190 INFO [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 753427f7c0fbf1885adf61308024b068/info in TestLogRolling-testLogRolling,row0062,1731761392066.753427f7c0fbf1885adf61308024b068. 2024-11-16T12:50:06,190 INFO [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/67654f8c2fe2454b8468cef3967cc67e, hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/1eb824ea41f34784b9ba218158a5ffbf, hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/28bbac39cde3424580b64f1d6537932e] into tmpdir=hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/.tmp, totalSize=64.2 K 2024-11-16T12:50:06,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33541 {}] regionserver.HRegion(8855): Flush requested on 753427f7c0fbf1885adf61308024b068 2024-11-16T12:50:06,190 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 753427f7c0fbf1885adf61308024b068 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-16T12:50:06,191 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] compactions.Compactor(225): Compacting 67654f8c2fe2454b8468cef3967cc67e, keycount=21, bloomtype=ROW, size=27.1 K, encoding=NONE, compression=NONE, seqNum=110, earliestPutTs=1731761391994 2024-11-16T12:50:06,192 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1eb824ea41f34784b9ba218158a5ffbf, keycount=15, bloomtype=ROW, size=20.6 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1731761404104 2024-11-16T12:50:06,192 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] compactions.Compactor(225): Compacting 28bbac39cde3424580b64f1d6537932e, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=143, earliestPutTs=1731761406140 2024-11-16T12:50:06,195 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/.tmp/info/33dc616900634249b7dc4f574c53bfeb is 1080, key is row0109/info:/1731761406166/Put/seqid=0 2024-11-16T12:50:06,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38281 is added to blk_1073741858_1034 (size=16828) 2024-11-16T12:50:06,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42515 is added to blk_1073741858_1034 (size=16828) 2024-11-16T12:50:06,201 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/.tmp/info/33dc616900634249b7dc4f574c53bfeb 2024-11-16T12:50:06,205 INFO [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 753427f7c0fbf1885adf61308024b068#info#compaction#73 average throughput is 24.11 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-16T12:50:06,206 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/.tmp/info/b000d8996a1744b4b0149b84c3e3a125 is 1080, key is row0062/info:/1731761391994/Put/seqid=0 2024-11-16T12:50:06,209 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/.tmp/info/33dc616900634249b7dc4f574c53bfeb as hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/33dc616900634249b7dc4f574c53bfeb 2024-11-16T12:50:06,215 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/33dc616900634249b7dc4f574c53bfeb, entries=11, sequenceid=157, filesize=16.4 K 2024-11-16T12:50:06,216 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=9.46 KB/9684 for 753427f7c0fbf1885adf61308024b068 in 26ms, sequenceid=157, compaction requested=false 2024-11-16T12:50:06,216 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 753427f7c0fbf1885adf61308024b068: 2024-11-16T12:50:06,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38281 is added to blk_1073741859_1035 (size=55934) 2024-11-16T12:50:06,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42515 is added to blk_1073741859_1035 (size=55934) 2024-11-16T12:50:06,223 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/.tmp/info/b000d8996a1744b4b0149b84c3e3a125 as hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/b000d8996a1744b4b0149b84c3e3a125 2024-11-16T12:50:06,229 INFO [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 753427f7c0fbf1885adf61308024b068/info of 753427f7c0fbf1885adf61308024b068 into b000d8996a1744b4b0149b84c3e3a125(size=54.6 K), total size for store is 71.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-16T12:50:06,229 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 753427f7c0fbf1885adf61308024b068: 2024-11-16T12:50:06,229 INFO [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731761392066.753427f7c0fbf1885adf61308024b068., storeName=753427f7c0fbf1885adf61308024b068/info, priority=13, startTime=1731761406188; duration=0sec 2024-11-16T12:50:06,229 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T12:50:06,230 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 753427f7c0fbf1885adf61308024b068:info 2024-11-16T12:50:06,422 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:50:06,619 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:50:07,422 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:50:07,620 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:50:08,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33541 {}] regionserver.HRegion(8855): Flush requested on 753427f7c0fbf1885adf61308024b068 2024-11-16T12:50:08,217 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 753427f7c0fbf1885adf61308024b068 1/1 column families, dataSize=10.51 KB heapSize=11.50 KB 2024-11-16T12:50:08,221 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/.tmp/info/00e93218f90e4f59812df783174ab33c is 1080, key is row0120/info:/1731761406192/Put/seqid=0 2024-11-16T12:50:08,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38281 is added to blk_1073741860_1036 (size=15750) 2024-11-16T12:50:08,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42515 is added to blk_1073741860_1036 (size=15750) 2024-11-16T12:50:08,226 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.51 KB at sequenceid=171 (bloomFilter=true), to=hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/.tmp/info/00e93218f90e4f59812df783174ab33c 2024-11-16T12:50:08,233 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/.tmp/info/00e93218f90e4f59812df783174ab33c as hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/00e93218f90e4f59812df783174ab33c 2024-11-16T12:50:08,239 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/00e93218f90e4f59812df783174ab33c, entries=10, sequenceid=171, filesize=15.4 K 2024-11-16T12:50:08,240 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.51 KB/10760, heapSize ~11.48 KB/11760, currentSize=10.51 KB/10760 for 753427f7c0fbf1885adf61308024b068 in 23ms, sequenceid=171, compaction requested=true 2024-11-16T12:50:08,240 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 753427f7c0fbf1885adf61308024b068: 2024-11-16T12:50:08,240 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 753427f7c0fbf1885adf61308024b068:info, priority=-2147483648, current under compaction store size is 1 2024-11-16T12:50:08,240 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T12:50:08,240 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-16T12:50:08,242 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 88512 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-16T12:50:08,242 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.HStore(1541): 753427f7c0fbf1885adf61308024b068/info is initiating minor compaction (all files) 2024-11-16T12:50:08,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33541 {}] regionserver.HRegion(8855): Flush requested on 753427f7c0fbf1885adf61308024b068 2024-11-16T12:50:08,242 INFO [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 753427f7c0fbf1885adf61308024b068/info in TestLogRolling-testLogRolling,row0062,1731761392066.753427f7c0fbf1885adf61308024b068. 2024-11-16T12:50:08,242 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 753427f7c0fbf1885adf61308024b068 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-16T12:50:08,242 INFO [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/b000d8996a1744b4b0149b84c3e3a125, hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/33dc616900634249b7dc4f574c53bfeb, hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/00e93218f90e4f59812df783174ab33c] into tmpdir=hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/.tmp, totalSize=86.4 K 2024-11-16T12:50:08,243 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] compactions.Compactor(225): Compacting b000d8996a1744b4b0149b84c3e3a125, keycount=47, bloomtype=ROW, size=54.6 K, encoding=NONE, compression=NONE, seqNum=143, earliestPutTs=1731761391994 2024-11-16T12:50:08,244 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] compactions.Compactor(225): Compacting 33dc616900634249b7dc4f574c53bfeb, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1731761406166 2024-11-16T12:50:08,244 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] compactions.Compactor(225): Compacting 00e93218f90e4f59812df783174ab33c, keycount=10, bloomtype=ROW, size=15.4 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1731761406192 2024-11-16T12:50:08,246 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/.tmp/info/2ce408dba1c94ec69ad48be48a60d107 is 1080, key is row0130/info:/1731761408218/Put/seqid=0 2024-11-16T12:50:08,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42515 is added to blk_1073741861_1037 (size=16828) 2024-11-16T12:50:08,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38281 is added to blk_1073741861_1037 (size=16828) 2024-11-16T12:50:08,259 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=185 (bloomFilter=true), to=hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/.tmp/info/2ce408dba1c94ec69ad48be48a60d107 2024-11-16T12:50:08,280 INFO [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 753427f7c0fbf1885adf61308024b068#info#compaction#76 average throughput is 3.32 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-16T12:50:08,280 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/.tmp/info/f993de03991041eca195498765ceeb72 is 1080, key is row0062/info:/1731761391994/Put/seqid=0 2024-11-16T12:50:08,293 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/.tmp/info/2ce408dba1c94ec69ad48be48a60d107 as hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/2ce408dba1c94ec69ad48be48a60d107 2024-11-16T12:50:08,299 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/2ce408dba1c94ec69ad48be48a60d107, entries=11, sequenceid=185, filesize=16.4 K 2024-11-16T12:50:08,300 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=12.61 KB/12912 for 753427f7c0fbf1885adf61308024b068 in 58ms, sequenceid=185, compaction requested=false 2024-11-16T12:50:08,300 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 753427f7c0fbf1885adf61308024b068: 2024-11-16T12:50:08,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33541 {}] regionserver.HRegion(8855): Flush requested on 753427f7c0fbf1885adf61308024b068 2024-11-16T12:50:08,302 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 753427f7c0fbf1885adf61308024b068 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-16T12:50:08,312 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/.tmp/info/bc30cbe6401f4e5390788d8de5f016fa is 1080, key is row0141/info:/1731761408243/Put/seqid=0 2024-11-16T12:50:08,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38281 is added to blk_1073741862_1038 (size=78811) 2024-11-16T12:50:08,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42515 is added to blk_1073741862_1038 (size=78811) 2024-11-16T12:50:08,324 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/.tmp/info/f993de03991041eca195498765ceeb72 as hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/f993de03991041eca195498765ceeb72 2024-11-16T12:50:08,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38281 is added to blk_1073741863_1039 (size=20078) 2024-11-16T12:50:08,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42515 is added to blk_1073741863_1039 (size=20078) 2024-11-16T12:50:08,327 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=202 (bloomFilter=true), to=hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/.tmp/info/bc30cbe6401f4e5390788d8de5f016fa 2024-11-16T12:50:08,337 INFO [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 753427f7c0fbf1885adf61308024b068/info of 753427f7c0fbf1885adf61308024b068 into f993de03991041eca195498765ceeb72(size=77.0 K), total size for store is 93.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-16T12:50:08,337 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 753427f7c0fbf1885adf61308024b068: 2024-11-16T12:50:08,337 INFO [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731761392066.753427f7c0fbf1885adf61308024b068., storeName=753427f7c0fbf1885adf61308024b068/info, priority=13, startTime=1731761408240; duration=0sec 2024-11-16T12:50:08,337 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T12:50:08,337 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 753427f7c0fbf1885adf61308024b068:info 2024-11-16T12:50:08,340 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/.tmp/info/bc30cbe6401f4e5390788d8de5f016fa as hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/bc30cbe6401f4e5390788d8de5f016fa 2024-11-16T12:50:08,348 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/bc30cbe6401f4e5390788d8de5f016fa, entries=14, sequenceid=202, filesize=19.6 K 2024-11-16T12:50:08,349 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=6.30 KB/6456 for 753427f7c0fbf1885adf61308024b068 in 47ms, sequenceid=202, compaction requested=true 2024-11-16T12:50:08,349 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 753427f7c0fbf1885adf61308024b068: 2024-11-16T12:50:08,349 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 753427f7c0fbf1885adf61308024b068:info, priority=-2147483648, current under compaction store size is 1 2024-11-16T12:50:08,349 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T12:50:08,349 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-16T12:50:08,350 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 115717 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-16T12:50:08,350 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.HStore(1541): 753427f7c0fbf1885adf61308024b068/info is initiating minor compaction (all files) 2024-11-16T12:50:08,351 INFO [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 753427f7c0fbf1885adf61308024b068/info in TestLogRolling-testLogRolling,row0062,1731761392066.753427f7c0fbf1885adf61308024b068. 2024-11-16T12:50:08,351 INFO [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/f993de03991041eca195498765ceeb72, hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/2ce408dba1c94ec69ad48be48a60d107, hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/bc30cbe6401f4e5390788d8de5f016fa] into tmpdir=hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/.tmp, totalSize=113.0 K 2024-11-16T12:50:08,351 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] compactions.Compactor(225): Compacting f993de03991041eca195498765ceeb72, keycount=68, bloomtype=ROW, size=77.0 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1731761391994 2024-11-16T12:50:08,351 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] compactions.Compactor(225): Compacting 2ce408dba1c94ec69ad48be48a60d107, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=185, earliestPutTs=1731761408218 2024-11-16T12:50:08,352 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] compactions.Compactor(225): Compacting bc30cbe6401f4e5390788d8de5f016fa, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=202, earliestPutTs=1731761408243 2024-11-16T12:50:08,367 INFO [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 753427f7c0fbf1885adf61308024b068#info#compaction#78 average throughput is 31.81 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-16T12:50:08,367 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/.tmp/info/32e4a8c7ec454fdaaa2c11ad85761baf is 1080, key is row0062/info:/1731761391994/Put/seqid=0 2024-11-16T12:50:08,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38281 is added to blk_1073741864_1040 (size=105871) 2024-11-16T12:50:08,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42515 is added to blk_1073741864_1040 (size=105871) 2024-11-16T12:50:08,404 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/.tmp/info/32e4a8c7ec454fdaaa2c11ad85761baf as hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/32e4a8c7ec454fdaaa2c11ad85761baf 2024-11-16T12:50:08,413 INFO [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 753427f7c0fbf1885adf61308024b068/info of 753427f7c0fbf1885adf61308024b068 into 32e4a8c7ec454fdaaa2c11ad85761baf(size=103.4 K), total size for store is 103.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-16T12:50:08,413 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 753427f7c0fbf1885adf61308024b068: 2024-11-16T12:50:08,413 INFO [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731761392066.753427f7c0fbf1885adf61308024b068., storeName=753427f7c0fbf1885adf61308024b068/info, priority=13, startTime=1731761408349; duration=0sec 2024-11-16T12:50:08,413 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T12:50:08,413 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 753427f7c0fbf1885adf61308024b068:info 2024-11-16T12:50:08,423 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:50:08,621 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:50:09,424 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:50:09,621 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:50:09,940 INFO [master/0450ab8807f5:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-16T12:50:09,940 INFO [master/0450ab8807f5:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-16T12:50:10,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33541 {}] regionserver.HRegion(8855): Flush requested on 753427f7c0fbf1885adf61308024b068 2024-11-16T12:50:10,321 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 753427f7c0fbf1885adf61308024b068 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-16T12:50:10,327 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/.tmp/info/2b12a3ee354a44329cf1e518c3d3869f is 1080, key is row0155/info:/1731761408304/Put/seqid=0 2024-11-16T12:50:10,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42515 is added to blk_1073741865_1041 (size=12516) 2024-11-16T12:50:10,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38281 is added to blk_1073741865_1041 (size=12516) 2024-11-16T12:50:10,336 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=214 (bloomFilter=true), to=hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/.tmp/info/2b12a3ee354a44329cf1e518c3d3869f 2024-11-16T12:50:10,341 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/.tmp/info/2b12a3ee354a44329cf1e518c3d3869f as hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/2b12a3ee354a44329cf1e518c3d3869f 2024-11-16T12:50:10,345 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/2b12a3ee354a44329cf1e518c3d3869f, entries=7, sequenceid=214, filesize=12.2 K 2024-11-16T12:50:10,346 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=12.61 KB/12912 for 753427f7c0fbf1885adf61308024b068 in 26ms, sequenceid=214, compaction requested=false 2024-11-16T12:50:10,346 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 753427f7c0fbf1885adf61308024b068: 2024-11-16T12:50:10,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33541 {}] regionserver.HRegion(8855): Flush requested on 753427f7c0fbf1885adf61308024b068 2024-11-16T12:50:10,348 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 753427f7c0fbf1885adf61308024b068 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-16T12:50:10,352 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/.tmp/info/04f7f71d9f7a4d16a384b6222e878cf4 is 1080, key is row0162/info:/1731761410322/Put/seqid=0 2024-11-16T12:50:10,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42515 is added to blk_1073741866_1042 (size=19000) 2024-11-16T12:50:10,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38281 is added to blk_1073741866_1042 (size=19000) 2024-11-16T12:50:10,360 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=230 (bloomFilter=true), to=hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/.tmp/info/04f7f71d9f7a4d16a384b6222e878cf4 2024-11-16T12:50:10,366 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/.tmp/info/04f7f71d9f7a4d16a384b6222e878cf4 as hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/04f7f71d9f7a4d16a384b6222e878cf4 2024-11-16T12:50:10,371 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/04f7f71d9f7a4d16a384b6222e878cf4, entries=13, sequenceid=230, filesize=18.6 K 2024-11-16T12:50:10,372 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=12.61 KB/12912 for 753427f7c0fbf1885adf61308024b068 in 24ms, sequenceid=230, compaction requested=true 2024-11-16T12:50:10,372 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 753427f7c0fbf1885adf61308024b068: 2024-11-16T12:50:10,372 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 753427f7c0fbf1885adf61308024b068:info, priority=-2147483648, current under compaction store size is 1 2024-11-16T12:50:10,372 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T12:50:10,372 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-16T12:50:10,374 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 137387 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-16T12:50:10,374 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.HStore(1541): 753427f7c0fbf1885adf61308024b068/info is initiating minor compaction (all files) 2024-11-16T12:50:10,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33541 {}] regionserver.HRegion(8855): Flush requested on 753427f7c0fbf1885adf61308024b068 2024-11-16T12:50:10,374 INFO [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 753427f7c0fbf1885adf61308024b068/info in TestLogRolling-testLogRolling,row0062,1731761392066.753427f7c0fbf1885adf61308024b068. 2024-11-16T12:50:10,374 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 753427f7c0fbf1885adf61308024b068 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-16T12:50:10,374 INFO [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/32e4a8c7ec454fdaaa2c11ad85761baf, hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/2b12a3ee354a44329cf1e518c3d3869f, hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/04f7f71d9f7a4d16a384b6222e878cf4] into tmpdir=hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/.tmp, totalSize=134.2 K 2024-11-16T12:50:10,374 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] compactions.Compactor(225): Compacting 32e4a8c7ec454fdaaa2c11ad85761baf, keycount=93, bloomtype=ROW, size=103.4 K, encoding=NONE, compression=NONE, seqNum=202, earliestPutTs=1731761391994 2024-11-16T12:50:10,375 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] compactions.Compactor(225): Compacting 2b12a3ee354a44329cf1e518c3d3869f, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1731761408304 2024-11-16T12:50:10,375 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] compactions.Compactor(225): Compacting 04f7f71d9f7a4d16a384b6222e878cf4, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=230, earliestPutTs=1731761410322 2024-11-16T12:50:10,378 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/.tmp/info/dbb9a579c4f5471ab312d991c52e088f is 1080, key is row0175/info:/1731761410349/Put/seqid=0 2024-11-16T12:50:10,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42515 is added to blk_1073741867_1043 (size=19000) 2024-11-16T12:50:10,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38281 is added to blk_1073741867_1043 (size=19000) 2024-11-16T12:50:10,387 INFO [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 753427f7c0fbf1885adf61308024b068#info#compaction#82 average throughput is 57.98 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-16T12:50:10,388 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/.tmp/info/cc66912f878d4515aee2e77b85f97c5b is 1080, key is row0062/info:/1731761391994/Put/seqid=0 2024-11-16T12:50:10,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42515 is added to blk_1073741868_1044 (size=127665) 2024-11-16T12:50:10,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38281 is added to blk_1073741868_1044 (size=127665) 2024-11-16T12:50:10,399 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/.tmp/info/cc66912f878d4515aee2e77b85f97c5b as hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/cc66912f878d4515aee2e77b85f97c5b 2024-11-16T12:50:10,406 INFO [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 753427f7c0fbf1885adf61308024b068/info of 753427f7c0fbf1885adf61308024b068 into cc66912f878d4515aee2e77b85f97c5b(size=124.7 K), total size for store is 124.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-16T12:50:10,406 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 753427f7c0fbf1885adf61308024b068: 2024-11-16T12:50:10,406 INFO [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731761392066.753427f7c0fbf1885adf61308024b068., storeName=753427f7c0fbf1885adf61308024b068/info, priority=13, startTime=1731761410372; duration=0sec 2024-11-16T12:50:10,406 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T12:50:10,406 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 753427f7c0fbf1885adf61308024b068:info 2024-11-16T12:50:10,424 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:50:10,622 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:50:10,785 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=246 (bloomFilter=true), to=hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/.tmp/info/dbb9a579c4f5471ab312d991c52e088f 2024-11-16T12:50:10,798 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/.tmp/info/dbb9a579c4f5471ab312d991c52e088f as hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/dbb9a579c4f5471ab312d991c52e088f 2024-11-16T12:50:10,805 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/dbb9a579c4f5471ab312d991c52e088f, entries=13, sequenceid=246, filesize=18.6 K 2024-11-16T12:50:10,806 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=5.25 KB/5380 for 753427f7c0fbf1885adf61308024b068 in 432ms, sequenceid=246, compaction requested=false 2024-11-16T12:50:10,806 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 753427f7c0fbf1885adf61308024b068: 2024-11-16T12:50:11,425 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:50:11,623 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:50:12,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33541 {}] regionserver.HRegion(8855): Flush requested on 753427f7c0fbf1885adf61308024b068 2024-11-16T12:50:12,388 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 753427f7c0fbf1885adf61308024b068 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-16T12:50:12,392 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/.tmp/info/2b59ba78315a4eb98370093c17176336 is 1080, key is row0188/info:/1731761410375/Put/seqid=0 2024-11-16T12:50:12,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38281 is added to blk_1073741869_1045 (size=12517) 2024-11-16T12:50:12,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42515 is added to blk_1073741869_1045 (size=12517) 2024-11-16T12:50:12,405 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=257 (bloomFilter=true), to=hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/.tmp/info/2b59ba78315a4eb98370093c17176336 2024-11-16T12:50:12,412 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/.tmp/info/2b59ba78315a4eb98370093c17176336 as hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/2b59ba78315a4eb98370093c17176336 2024-11-16T12:50:12,417 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/2b59ba78315a4eb98370093c17176336, entries=7, sequenceid=257, filesize=12.2 K 2024-11-16T12:50:12,418 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=13.66 KB/13988 for 753427f7c0fbf1885adf61308024b068 in 30ms, sequenceid=257, compaction requested=true 2024-11-16T12:50:12,418 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 753427f7c0fbf1885adf61308024b068: 2024-11-16T12:50:12,418 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 753427f7c0fbf1885adf61308024b068:info, priority=-2147483648, current under compaction store size is 1 2024-11-16T12:50:12,418 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T12:50:12,419 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-16T12:50:12,420 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 159182 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-16T12:50:12,420 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.HStore(1541): 753427f7c0fbf1885adf61308024b068/info is initiating minor compaction (all files) 2024-11-16T12:50:12,420 INFO [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 753427f7c0fbf1885adf61308024b068/info in TestLogRolling-testLogRolling,row0062,1731761392066.753427f7c0fbf1885adf61308024b068. 2024-11-16T12:50:12,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33541 {}] regionserver.HRegion(8855): Flush requested on 753427f7c0fbf1885adf61308024b068 2024-11-16T12:50:12,420 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 753427f7c0fbf1885adf61308024b068 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-16T12:50:12,420 INFO [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/cc66912f878d4515aee2e77b85f97c5b, hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/dbb9a579c4f5471ab312d991c52e088f, hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/2b59ba78315a4eb98370093c17176336] into tmpdir=hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/.tmp, totalSize=155.5 K 2024-11-16T12:50:12,420 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] compactions.Compactor(225): Compacting cc66912f878d4515aee2e77b85f97c5b, keycount=113, bloomtype=ROW, size=124.7 K, encoding=NONE, compression=NONE, seqNum=230, earliestPutTs=1731761391994 2024-11-16T12:50:12,421 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] compactions.Compactor(225): Compacting dbb9a579c4f5471ab312d991c52e088f, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=246, earliestPutTs=1731761410349 2024-11-16T12:50:12,421 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] compactions.Compactor(225): Compacting 2b59ba78315a4eb98370093c17176336, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=257, earliestPutTs=1731761410375 2024-11-16T12:50:12,424 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/.tmp/info/8d1c9d61ec82470a87c66111e02e6bf8 is 1080, key is row0195/info:/1731761412389/Put/seqid=0 2024-11-16T12:50:12,425 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:50:12,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38281 is added to blk_1073741870_1046 (size=20092) 2024-11-16T12:50:12,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42515 is added to blk_1073741870_1046 (size=20092) 2024-11-16T12:50:12,433 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=274 (bloomFilter=true), to=hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/.tmp/info/8d1c9d61ec82470a87c66111e02e6bf8 2024-11-16T12:50:12,434 INFO [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 753427f7c0fbf1885adf61308024b068#info#compaction#85 average throughput is 45.49 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-16T12:50:12,435 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/.tmp/info/f3d5d46668ef4b00a824ea5ba93db69d is 1080, key is row0062/info:/1731761391994/Put/seqid=0 2024-11-16T12:50:12,441 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/.tmp/info/8d1c9d61ec82470a87c66111e02e6bf8 as hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/8d1c9d61ec82470a87c66111e02e6bf8 2024-11-16T12:50:12,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42515 is added to blk_1073741871_1047 (size=149401) 2024-11-16T12:50:12,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38281 is added to blk_1073741871_1047 (size=149401) 2024-11-16T12:50:12,448 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/8d1c9d61ec82470a87c66111e02e6bf8, entries=14, sequenceid=274, filesize=19.6 K 2024-11-16T12:50:12,449 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=10.51 KB/10760 for 753427f7c0fbf1885adf61308024b068 in 29ms, sequenceid=274, compaction requested=false 2024-11-16T12:50:12,449 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 753427f7c0fbf1885adf61308024b068: 2024-11-16T12:50:12,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33541 {}] regionserver.HRegion(8855): Flush requested on 753427f7c0fbf1885adf61308024b068 2024-11-16T12:50:12,449 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 753427f7c0fbf1885adf61308024b068 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-16T12:50:12,453 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/.tmp/info/e7c6ae34920844989ac5d01b128757dd is 1080, key is row0209/info:/1731761412421/Put/seqid=0 2024-11-16T12:50:12,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38281 is added to blk_1073741872_1048 (size=16839) 2024-11-16T12:50:12,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42515 is added to blk_1073741872_1048 (size=16839) 2024-11-16T12:50:12,465 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=288 (bloomFilter=true), to=hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/.tmp/info/e7c6ae34920844989ac5d01b128757dd 2024-11-16T12:50:12,470 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/.tmp/info/e7c6ae34920844989ac5d01b128757dd as hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/e7c6ae34920844989ac5d01b128757dd 2024-11-16T12:50:12,475 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/e7c6ae34920844989ac5d01b128757dd, entries=11, sequenceid=288, filesize=16.4 K 2024-11-16T12:50:12,476 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=5.25 KB/5380 for 753427f7c0fbf1885adf61308024b068 in 27ms, sequenceid=288, compaction requested=false 2024-11-16T12:50:12,476 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 753427f7c0fbf1885adf61308024b068: 2024-11-16T12:50:12,625 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:50:12,850 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/.tmp/info/f3d5d46668ef4b00a824ea5ba93db69d as hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/f3d5d46668ef4b00a824ea5ba93db69d 2024-11-16T12:50:12,856 INFO [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 753427f7c0fbf1885adf61308024b068/info of 753427f7c0fbf1885adf61308024b068 into f3d5d46668ef4b00a824ea5ba93db69d(size=145.9 K), total size for store is 182.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-16T12:50:12,856 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 753427f7c0fbf1885adf61308024b068: 2024-11-16T12:50:12,856 INFO [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731761392066.753427f7c0fbf1885adf61308024b068., storeName=753427f7c0fbf1885adf61308024b068/info, priority=13, startTime=1731761412418; duration=0sec 2024-11-16T12:50:12,857 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T12:50:12,857 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 753427f7c0fbf1885adf61308024b068:info 2024-11-16T12:50:13,426 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:50:13,625 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:50:14,427 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:50:14,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33541 {}] regionserver.HRegion(8855): Flush requested on 753427f7c0fbf1885adf61308024b068 2024-11-16T12:50:14,465 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 753427f7c0fbf1885adf61308024b068 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-16T12:50:14,469 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/.tmp/info/7bb0efddddda4914a99e6a90793eed42 is 1080, key is row0220/info:/1731761412450/Put/seqid=0 2024-11-16T12:50:14,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38281 is added to blk_1073741873_1049 (size=12523) 2024-11-16T12:50:14,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42515 is added to blk_1073741873_1049 (size=12523) 2024-11-16T12:50:14,474 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=299 (bloomFilter=true), to=hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/.tmp/info/7bb0efddddda4914a99e6a90793eed42 2024-11-16T12:50:14,479 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/.tmp/info/7bb0efddddda4914a99e6a90793eed42 as hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/7bb0efddddda4914a99e6a90793eed42 2024-11-16T12:50:14,485 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/7bb0efddddda4914a99e6a90793eed42, entries=7, sequenceid=299, filesize=12.2 K 2024-11-16T12:50:14,486 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=11.56 KB/11836 for 753427f7c0fbf1885adf61308024b068 in 21ms, sequenceid=299, compaction requested=true 2024-11-16T12:50:14,486 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 753427f7c0fbf1885adf61308024b068: 2024-11-16T12:50:14,487 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 753427f7c0fbf1885adf61308024b068:info, priority=-2147483648, current under compaction store size is 1 2024-11-16T12:50:14,487 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T12:50:14,487 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-16T12:50:14,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33541 {}] regionserver.HRegion(8855): Flush requested on 753427f7c0fbf1885adf61308024b068 2024-11-16T12:50:14,487 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 753427f7c0fbf1885adf61308024b068 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-16T12:50:14,488 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 198855 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-16T12:50:14,488 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.HStore(1541): 753427f7c0fbf1885adf61308024b068/info is initiating minor compaction (all files) 2024-11-16T12:50:14,488 INFO [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 753427f7c0fbf1885adf61308024b068/info in TestLogRolling-testLogRolling,row0062,1731761392066.753427f7c0fbf1885adf61308024b068. 2024-11-16T12:50:14,489 INFO [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/f3d5d46668ef4b00a824ea5ba93db69d, hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/8d1c9d61ec82470a87c66111e02e6bf8, hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/e7c6ae34920844989ac5d01b128757dd, hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/7bb0efddddda4914a99e6a90793eed42] into tmpdir=hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/.tmp, totalSize=194.2 K 2024-11-16T12:50:14,489 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] compactions.Compactor(225): Compacting f3d5d46668ef4b00a824ea5ba93db69d, keycount=133, bloomtype=ROW, size=145.9 K, encoding=NONE, compression=NONE, seqNum=257, earliestPutTs=1731761391994 2024-11-16T12:50:14,489 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] compactions.Compactor(225): Compacting 8d1c9d61ec82470a87c66111e02e6bf8, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1731761412389 2024-11-16T12:50:14,490 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] compactions.Compactor(225): Compacting e7c6ae34920844989ac5d01b128757dd, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1731761412421 2024-11-16T12:50:14,490 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7bb0efddddda4914a99e6a90793eed42, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=299, earliestPutTs=1731761412450 2024-11-16T12:50:14,491 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/.tmp/info/f5a0c50dd3f54aa1b939275d5600a8af is 1080, key is row0227/info:/1731761414466/Put/seqid=0 2024-11-16T12:50:14,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38281 is added to blk_1073741874_1050 (size=17918) 2024-11-16T12:50:14,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42515 is added to blk_1073741874_1050 (size=17918) 2024-11-16T12:50:14,505 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=314 (bloomFilter=true), to=hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/.tmp/info/f5a0c50dd3f54aa1b939275d5600a8af 2024-11-16T12:50:14,507 INFO [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 753427f7c0fbf1885adf61308024b068#info#compaction#89 average throughput is 42.33 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-16T12:50:14,508 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/.tmp/info/18d89bed7ff340c3b8d8a445de7f735b is 1080, key is row0062/info:/1731761391994/Put/seqid=0 2024-11-16T12:50:14,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38281 is added to blk_1073741875_1051 (size=184073) 2024-11-16T12:50:14,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42515 is added to blk_1073741875_1051 (size=184073) 2024-11-16T12:50:14,512 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/.tmp/info/f5a0c50dd3f54aa1b939275d5600a8af as hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/f5a0c50dd3f54aa1b939275d5600a8af 2024-11-16T12:50:14,517 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/f5a0c50dd3f54aa1b939275d5600a8af, entries=12, sequenceid=314, filesize=17.5 K 2024-11-16T12:50:14,518 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/.tmp/info/18d89bed7ff340c3b8d8a445de7f735b as hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/18d89bed7ff340c3b8d8a445de7f735b 2024-11-16T12:50:14,518 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=14.71 KB/15064 for 753427f7c0fbf1885adf61308024b068 in 31ms, sequenceid=314, compaction requested=false 2024-11-16T12:50:14,519 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 753427f7c0fbf1885adf61308024b068: 2024-11-16T12:50:14,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33541 {}] regionserver.HRegion(8855): Flush requested on 753427f7c0fbf1885adf61308024b068 2024-11-16T12:50:14,520 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 753427f7c0fbf1885adf61308024b068 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-11-16T12:50:14,524 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/.tmp/info/4209a8e5da4d4177b5b4fcf51e4b1084 is 1080, key is row0239/info:/1731761414488/Put/seqid=0 2024-11-16T12:50:14,525 INFO [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 4 (all) file(s) in 753427f7c0fbf1885adf61308024b068/info of 753427f7c0fbf1885adf61308024b068 into 18d89bed7ff340c3b8d8a445de7f735b(size=179.8 K), total size for store is 197.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-16T12:50:14,526 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 753427f7c0fbf1885adf61308024b068: 2024-11-16T12:50:14,526 INFO [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731761392066.753427f7c0fbf1885adf61308024b068., storeName=753427f7c0fbf1885adf61308024b068/info, priority=12, startTime=1731761414486; duration=0sec 2024-11-16T12:50:14,526 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T12:50:14,526 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 753427f7c0fbf1885adf61308024b068:info 2024-11-16T12:50:14,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38281 is added to blk_1073741876_1052 (size=21171) 2024-11-16T12:50:14,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42515 is added to blk_1073741876_1052 (size=21171) 2024-11-16T12:50:14,530 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=332 (bloomFilter=true), to=hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/.tmp/info/4209a8e5da4d4177b5b4fcf51e4b1084 2024-11-16T12:50:14,534 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/.tmp/info/4209a8e5da4d4177b5b4fcf51e4b1084 as hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/4209a8e5da4d4177b5b4fcf51e4b1084 2024-11-16T12:50:14,538 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/4209a8e5da4d4177b5b4fcf51e4b1084, entries=15, sequenceid=332, filesize=20.7 K 2024-11-16T12:50:14,540 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=3.15 KB/3228 for 753427f7c0fbf1885adf61308024b068 in 20ms, sequenceid=332, compaction requested=true 2024-11-16T12:50:14,540 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 753427f7c0fbf1885adf61308024b068: 2024-11-16T12:50:14,540 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 753427f7c0fbf1885adf61308024b068:info, priority=-2147483648, current under compaction store size is 1 2024-11-16T12:50:14,540 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T12:50:14,540 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-16T12:50:14,541 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 223162 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-16T12:50:14,541 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.HStore(1541): 753427f7c0fbf1885adf61308024b068/info is initiating minor compaction (all files) 2024-11-16T12:50:14,541 INFO [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 753427f7c0fbf1885adf61308024b068/info in TestLogRolling-testLogRolling,row0062,1731761392066.753427f7c0fbf1885adf61308024b068. 2024-11-16T12:50:14,541 INFO [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/18d89bed7ff340c3b8d8a445de7f735b, hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/f5a0c50dd3f54aa1b939275d5600a8af, hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/4209a8e5da4d4177b5b4fcf51e4b1084] into tmpdir=hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/.tmp, totalSize=217.9 K 2024-11-16T12:50:14,542 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] compactions.Compactor(225): Compacting 18d89bed7ff340c3b8d8a445de7f735b, keycount=165, bloomtype=ROW, size=179.8 K, encoding=NONE, compression=NONE, seqNum=299, earliestPutTs=1731761391994 2024-11-16T12:50:14,542 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] compactions.Compactor(225): Compacting f5a0c50dd3f54aa1b939275d5600a8af, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=314, earliestPutTs=1731761414466 2024-11-16T12:50:14,542 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] compactions.Compactor(225): Compacting 4209a8e5da4d4177b5b4fcf51e4b1084, keycount=15, bloomtype=ROW, size=20.7 K, encoding=NONE, compression=NONE, seqNum=332, earliestPutTs=1731761414488 2024-11-16T12:50:14,552 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1588230740, had cached 0 bytes from a total of 20375 2024-11-16T12:50:14,556 INFO [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 753427f7c0fbf1885adf61308024b068#info#compaction#91 average throughput is 39.40 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-16T12:50:14,556 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/.tmp/info/902dc04ea3944224b81b21d9fbb0d67d is 1080, key is row0062/info:/1731761391994/Put/seqid=0 2024-11-16T12:50:14,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42515 is added to blk_1073741877_1053 (size=213385) 2024-11-16T12:50:14,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38281 is added to blk_1073741877_1053 (size=213385) 2024-11-16T12:50:14,565 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/.tmp/info/902dc04ea3944224b81b21d9fbb0d67d as hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/902dc04ea3944224b81b21d9fbb0d67d 2024-11-16T12:50:14,571 INFO [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 753427f7c0fbf1885adf61308024b068/info of 753427f7c0fbf1885adf61308024b068 into 902dc04ea3944224b81b21d9fbb0d67d(size=208.4 K), total size for store is 208.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-16T12:50:14,572 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 753427f7c0fbf1885adf61308024b068: 2024-11-16T12:50:14,572 INFO [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731761392066.753427f7c0fbf1885adf61308024b068., storeName=753427f7c0fbf1885adf61308024b068/info, priority=13, startTime=1731761414540; duration=0sec 2024-11-16T12:50:14,572 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T12:50:14,572 DEBUG [RS:0;0450ab8807f5:33541-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 753427f7c0fbf1885adf61308024b068:info 2024-11-16T12:50:14,626 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:50:15,428 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:50:15,627 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:50:16,429 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:50:16,527 INFO [Time-limited test {}] wal.AbstractTestLogRolling(285): after writing there are 0 log files 2024-11-16T12:50:16,527 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0450ab8807f5%2C33541%2C1731761368545.1731761416527 2024-11-16T12:50:16,533 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:50:16,533 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:50:16,533 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:50:16,533 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:50:16,533 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:50:16,533 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/WALs/0450ab8807f5,33541,1731761368545/0450ab8807f5%2C33541%2C1731761368545.1731761369062 with entries=316, filesize=309.52 KB; new WAL /user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/WALs/0450ab8807f5,33541,1731761368545/0450ab8807f5%2C33541%2C1731761368545.1731761416527 2024-11-16T12:50:16,534 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34389:34389),(127.0.0.1/127.0.0.1:33791:33791)] 2024-11-16T12:50:16,534 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/WALs/0450ab8807f5,33541,1731761368545/0450ab8807f5%2C33541%2C1731761368545.1731761369062 is not closed yet, will try archiving it next time 2024-11-16T12:50:16,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42515 is added to blk_1073741833_1009 (size=316959) 2024-11-16T12:50:16,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38281 is added to blk_1073741833_1009 (size=316959) 2024-11-16T12:50:16,538 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 753427f7c0fbf1885adf61308024b068 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-11-16T12:50:16,542 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/.tmp/info/6d47b361cbba4ad4996475fadfc5c006 is 1080, key is row0254/info:/1731761414521/Put/seqid=0 2024-11-16T12:50:16,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38281 is added to blk_1073741879_1055 (size=8199) 2024-11-16T12:50:16,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42515 is added to blk_1073741879_1055 (size=8199) 2024-11-16T12:50:16,551 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=340 (bloomFilter=true), to=hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/.tmp/info/6d47b361cbba4ad4996475fadfc5c006 2024-11-16T12:50:16,556 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/.tmp/info/6d47b361cbba4ad4996475fadfc5c006 as hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/6d47b361cbba4ad4996475fadfc5c006 2024-11-16T12:50:16,561 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/6d47b361cbba4ad4996475fadfc5c006, entries=3, sequenceid=340, filesize=8.0 K 2024-11-16T12:50:16,562 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 753427f7c0fbf1885adf61308024b068 in 23ms, sequenceid=340, compaction requested=false 2024-11-16T12:50:16,562 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 753427f7c0fbf1885adf61308024b068: 2024-11-16T12:50:16,562 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=670 B heapSize=2.02 KB 2024-11-16T12:50:16,566 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/hbase/meta/1588230740/.tmp/info/ddf1928433a44c38b47c39d26dc7f05e is 186, key is TestLogRolling-testLogRolling,,1731761392066.e8eaec5ba20e3ee40e68e1509c73db72./info:regioninfo/1731761392750/Put/seqid=0 2024-11-16T12:50:16,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42515 is added to blk_1073741880_1056 (size=6153) 2024-11-16T12:50:16,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38281 is added to blk_1073741880_1056 (size=6153) 2024-11-16T12:50:16,573 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=670 B at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/hbase/meta/1588230740/.tmp/info/ddf1928433a44c38b47c39d26dc7f05e 2024-11-16T12:50:16,578 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/hbase/meta/1588230740/.tmp/info/ddf1928433a44c38b47c39d26dc7f05e as hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/hbase/meta/1588230740/info/ddf1928433a44c38b47c39d26dc7f05e 2024-11-16T12:50:16,582 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/hbase/meta/1588230740/info/ddf1928433a44c38b47c39d26dc7f05e, entries=5, sequenceid=21, filesize=6.0 K 2024-11-16T12:50:16,583 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~670 B/670, heapSize ~1.25 KB/1280, currentSize=0 B/0 for 1588230740 in 21ms, sequenceid=21, compaction requested=false 2024-11-16T12:50:16,583 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-16T12:50:16,583 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for e8eaec5ba20e3ee40e68e1509c73db72: 2024-11-16T12:50:16,583 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0450ab8807f5%2C33541%2C1731761368545.1731761416583 2024-11-16T12:50:16,592 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:50:16,593 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:50:16,593 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:50:16,593 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:50:16,593 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:50:16,593 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/WALs/0450ab8807f5,33541,1731761368545/0450ab8807f5%2C33541%2C1731761368545.1731761416527 with entries=2, filesize=723 B; new WAL /user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/WALs/0450ab8807f5,33541,1731761368545/0450ab8807f5%2C33541%2C1731761368545.1731761416583 2024-11-16T12:50:16,594 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33791:33791),(127.0.0.1/127.0.0.1:34389:34389)] 2024-11-16T12:50:16,594 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/WALs/0450ab8807f5,33541,1731761368545/0450ab8807f5%2C33541%2C1731761368545.1731761416527 is not closed yet, will try archiving it next time 2024-11-16T12:50:16,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42515 is added to blk_1073741878_1054 (size=731) 2024-11-16T12:50:16,594 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/WALs/0450ab8807f5,33541,1731761368545/0450ab8807f5%2C33541%2C1731761368545.1731761369062 to hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/oldWALs/0450ab8807f5%2C33541%2C1731761368545.1731761369062 2024-11-16T12:50:16,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38281 is added to blk_1073741878_1054 (size=731) 2024-11-16T12:50:16,595 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [5,000] milli-secs(wait.for.ratio=[1]) 2024-11-16T12:50:16,595 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/WALs/0450ab8807f5,33541,1731761368545/0450ab8807f5%2C33541%2C1731761368545.1731761416527 to hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/oldWALs/0450ab8807f5%2C33541%2C1731761368545.1731761416527 2024-11-16T12:50:16,627 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:50:16,695 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-16T12:50:16,695 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-16T12:50:16,695 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T12:50:16,695 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T12:50:16,695 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T12:50:16,695 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-16T12:50:16,695 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-16T12:50:16,695 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=495321796, stopped=false 2024-11-16T12:50:16,696 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=0450ab8807f5,46653,1731761368406 2024-11-16T12:50:16,741 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33541-0x10144fb00090001, quorum=127.0.0.1:60221, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-16T12:50:16,741 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46653-0x10144fb00090000, quorum=127.0.0.1:60221, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-16T12:50:16,741 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33541-0x10144fb00090001, quorum=127.0.0.1:60221, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:50:16,741 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46653-0x10144fb00090000, quorum=127.0.0.1:60221, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:50:16,741 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-16T12:50:16,741 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-16T12:50:16,741 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T12:50:16,741 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T12:50:16,742 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '0450ab8807f5,33541,1731761368545' ***** 2024-11-16T12:50:16,742 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-16T12:50:16,742 INFO [RS:0;0450ab8807f5:33541 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-16T12:50:16,742 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:46653-0x10144fb00090000, quorum=127.0.0.1:60221, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T12:50:16,742 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:33541-0x10144fb00090001, quorum=127.0.0.1:60221, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T12:50:16,742 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-16T12:50:16,742 INFO [RS:0;0450ab8807f5:33541 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-16T12:50:16,743 INFO [RS:0;0450ab8807f5:33541 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-16T12:50:16,743 INFO [RS:0;0450ab8807f5:33541 {}] regionserver.HRegionServer(3091): Received CLOSE for 753427f7c0fbf1885adf61308024b068 2024-11-16T12:50:16,743 INFO [RS:0;0450ab8807f5:33541 {}] regionserver.HRegionServer(3091): Received CLOSE for e8eaec5ba20e3ee40e68e1509c73db72 2024-11-16T12:50:16,743 INFO [RS:0;0450ab8807f5:33541 {}] regionserver.HRegionServer(959): stopping server 0450ab8807f5,33541,1731761368545 2024-11-16T12:50:16,743 INFO [RS:0;0450ab8807f5:33541 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-16T12:50:16,743 DEBUG [RS_CLOSE_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 753427f7c0fbf1885adf61308024b068, disabling compactions & flushes 2024-11-16T12:50:16,743 INFO [RS_CLOSE_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,row0062,1731761392066.753427f7c0fbf1885adf61308024b068. 2024-11-16T12:50:16,743 INFO [RS:0;0450ab8807f5:33541 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;0450ab8807f5:33541. 2024-11-16T12:50:16,743 DEBUG [RS_CLOSE_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,row0062,1731761392066.753427f7c0fbf1885adf61308024b068. 2024-11-16T12:50:16,743 DEBUG [RS_CLOSE_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,row0062,1731761392066.753427f7c0fbf1885adf61308024b068. after waiting 0 ms 2024-11-16T12:50:16,743 DEBUG [RS:0;0450ab8807f5:33541 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T12:50:16,743 DEBUG [RS_CLOSE_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,row0062,1731761392066.753427f7c0fbf1885adf61308024b068. 2024-11-16T12:50:16,743 DEBUG [RS:0;0450ab8807f5:33541 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T12:50:16,744 INFO [RS:0;0450ab8807f5:33541 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-16T12:50:16,744 INFO [RS:0;0450ab8807f5:33541 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-16T12:50:16,744 INFO [RS:0;0450ab8807f5:33541 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-16T12:50:16,744 INFO [RS:0;0450ab8807f5:33541 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-16T12:50:16,744 INFO [RS:0;0450ab8807f5:33541 {}] regionserver.HRegionServer(1321): Waiting on 3 regions to close 2024-11-16T12:50:16,744 DEBUG [RS:0;0450ab8807f5:33541 {}] regionserver.HRegionServer(1325): Online Regions={753427f7c0fbf1885adf61308024b068=TestLogRolling-testLogRolling,row0062,1731761392066.753427f7c0fbf1885adf61308024b068., 1588230740=hbase:meta,,1.1588230740, e8eaec5ba20e3ee40e68e1509c73db72=TestLogRolling-testLogRolling,,1731761392066.e8eaec5ba20e3ee40e68e1509c73db72.} 2024-11-16T12:50:16,744 DEBUG [RS:0;0450ab8807f5:33541 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 753427f7c0fbf1885adf61308024b068, e8eaec5ba20e3ee40e68e1509c73db72 2024-11-16T12:50:16,744 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731761392066.753427f7c0fbf1885adf61308024b068.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/1df159e6bc5249a097db4245ac5b09a0.56327ae7c4c61355f9927cc610edcd05->hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/56327ae7c4c61355f9927cc610edcd05/info/1df159e6bc5249a097db4245ac5b09a0-top, hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/f37eceb1f8094c66b2f787e2ea2f0939, hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/dae8c05829bd4be2b3fbe4d74c3afd14, hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/67654f8c2fe2454b8468cef3967cc67e, hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/a644592b64ea49c1a9bf69344526bd0f, hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/1eb824ea41f34784b9ba218158a5ffbf, hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/b000d8996a1744b4b0149b84c3e3a125, hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/28bbac39cde3424580b64f1d6537932e, hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/33dc616900634249b7dc4f574c53bfeb, hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/f993de03991041eca195498765ceeb72, hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/00e93218f90e4f59812df783174ab33c, hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/2ce408dba1c94ec69ad48be48a60d107, hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/32e4a8c7ec454fdaaa2c11ad85761baf, hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/bc30cbe6401f4e5390788d8de5f016fa, hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/2b12a3ee354a44329cf1e518c3d3869f, hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/cc66912f878d4515aee2e77b85f97c5b, hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/04f7f71d9f7a4d16a384b6222e878cf4, hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/dbb9a579c4f5471ab312d991c52e088f, hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/f3d5d46668ef4b00a824ea5ba93db69d, hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/2b59ba78315a4eb98370093c17176336, hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/8d1c9d61ec82470a87c66111e02e6bf8, hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/e7c6ae34920844989ac5d01b128757dd, hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/18d89bed7ff340c3b8d8a445de7f735b, hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/7bb0efddddda4914a99e6a90793eed42, hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/f5a0c50dd3f54aa1b939275d5600a8af, hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/4209a8e5da4d4177b5b4fcf51e4b1084] to archive 2024-11-16T12:50:16,744 DEBUG [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-16T12:50:16,744 INFO [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-16T12:50:16,745 DEBUG [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-16T12:50:16,745 DEBUG [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-16T12:50:16,745 DEBUG [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-16T12:50:16,745 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731761392066.753427f7c0fbf1885adf61308024b068.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-16T12:50:16,747 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731761392066.753427f7c0fbf1885adf61308024b068.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/1df159e6bc5249a097db4245ac5b09a0.56327ae7c4c61355f9927cc610edcd05 to hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/archive/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/1df159e6bc5249a097db4245ac5b09a0.56327ae7c4c61355f9927cc610edcd05 2024-11-16T12:50:16,749 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731761392066.753427f7c0fbf1885adf61308024b068.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/f37eceb1f8094c66b2f787e2ea2f0939 to hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/archive/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/f37eceb1f8094c66b2f787e2ea2f0939 2024-11-16T12:50:16,750 DEBUG [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/hbase/meta/1588230740/recovered.edits/24.seqid, newMaxSeqId=24, maxSeqId=1 2024-11-16T12:50:16,750 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731761392066.753427f7c0fbf1885adf61308024b068.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/dae8c05829bd4be2b3fbe4d74c3afd14 to hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/archive/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/dae8c05829bd4be2b3fbe4d74c3afd14 2024-11-16T12:50:16,750 DEBUG [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-16T12:50:16,750 INFO [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-16T12:50:16,750 DEBUG [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731761416744Running coprocessor pre-close hooks at 1731761416744Disabling compacts and flushes for region at 1731761416744Disabling writes for close at 1731761416745 (+1 ms)Writing region close event to WAL at 1731761416746 (+1 ms)Running coprocessor post-close hooks at 1731761416750 (+4 ms)Closed at 1731761416750 2024-11-16T12:50:16,751 DEBUG [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-16T12:50:16,751 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731761392066.753427f7c0fbf1885adf61308024b068.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/67654f8c2fe2454b8468cef3967cc67e to hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/archive/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/67654f8c2fe2454b8468cef3967cc67e 2024-11-16T12:50:16,752 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731761392066.753427f7c0fbf1885adf61308024b068.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/a644592b64ea49c1a9bf69344526bd0f to hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/archive/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/a644592b64ea49c1a9bf69344526bd0f 2024-11-16T12:50:16,754 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731761392066.753427f7c0fbf1885adf61308024b068.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/1eb824ea41f34784b9ba218158a5ffbf to hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/archive/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/1eb824ea41f34784b9ba218158a5ffbf 2024-11-16T12:50:16,755 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731761392066.753427f7c0fbf1885adf61308024b068.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/b000d8996a1744b4b0149b84c3e3a125 to hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/archive/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/b000d8996a1744b4b0149b84c3e3a125 2024-11-16T12:50:16,756 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731761392066.753427f7c0fbf1885adf61308024b068.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/28bbac39cde3424580b64f1d6537932e to hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/archive/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/28bbac39cde3424580b64f1d6537932e 2024-11-16T12:50:16,758 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731761392066.753427f7c0fbf1885adf61308024b068.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/33dc616900634249b7dc4f574c53bfeb to hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/archive/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/33dc616900634249b7dc4f574c53bfeb 2024-11-16T12:50:16,759 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731761392066.753427f7c0fbf1885adf61308024b068.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/f993de03991041eca195498765ceeb72 to hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/archive/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/f993de03991041eca195498765ceeb72 2024-11-16T12:50:16,761 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731761392066.753427f7c0fbf1885adf61308024b068.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/00e93218f90e4f59812df783174ab33c to hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/archive/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/00e93218f90e4f59812df783174ab33c 2024-11-16T12:50:16,762 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731761392066.753427f7c0fbf1885adf61308024b068.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/2ce408dba1c94ec69ad48be48a60d107 to hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/archive/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/2ce408dba1c94ec69ad48be48a60d107 2024-11-16T12:50:16,763 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731761392066.753427f7c0fbf1885adf61308024b068.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/32e4a8c7ec454fdaaa2c11ad85761baf to hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/archive/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/32e4a8c7ec454fdaaa2c11ad85761baf 2024-11-16T12:50:16,765 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731761392066.753427f7c0fbf1885adf61308024b068.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/bc30cbe6401f4e5390788d8de5f016fa to hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/archive/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/bc30cbe6401f4e5390788d8de5f016fa 2024-11-16T12:50:16,766 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731761392066.753427f7c0fbf1885adf61308024b068.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/2b12a3ee354a44329cf1e518c3d3869f to hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/archive/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/2b12a3ee354a44329cf1e518c3d3869f 2024-11-16T12:50:16,767 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731761392066.753427f7c0fbf1885adf61308024b068.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/cc66912f878d4515aee2e77b85f97c5b to hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/archive/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/cc66912f878d4515aee2e77b85f97c5b 2024-11-16T12:50:16,768 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731761392066.753427f7c0fbf1885adf61308024b068.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/04f7f71d9f7a4d16a384b6222e878cf4 to hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/archive/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/04f7f71d9f7a4d16a384b6222e878cf4 2024-11-16T12:50:16,769 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731761392066.753427f7c0fbf1885adf61308024b068.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/dbb9a579c4f5471ab312d991c52e088f to hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/archive/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/dbb9a579c4f5471ab312d991c52e088f 2024-11-16T12:50:16,770 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731761392066.753427f7c0fbf1885adf61308024b068.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/f3d5d46668ef4b00a824ea5ba93db69d to hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/archive/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/f3d5d46668ef4b00a824ea5ba93db69d 2024-11-16T12:50:16,771 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731761392066.753427f7c0fbf1885adf61308024b068.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/2b59ba78315a4eb98370093c17176336 to hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/archive/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/2b59ba78315a4eb98370093c17176336 2024-11-16T12:50:16,772 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731761392066.753427f7c0fbf1885adf61308024b068.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/8d1c9d61ec82470a87c66111e02e6bf8 to hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/archive/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/8d1c9d61ec82470a87c66111e02e6bf8 2024-11-16T12:50:16,773 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731761392066.753427f7c0fbf1885adf61308024b068.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/e7c6ae34920844989ac5d01b128757dd to hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/archive/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/e7c6ae34920844989ac5d01b128757dd 2024-11-16T12:50:16,774 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731761392066.753427f7c0fbf1885adf61308024b068.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/18d89bed7ff340c3b8d8a445de7f735b to hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/archive/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/18d89bed7ff340c3b8d8a445de7f735b 2024-11-16T12:50:16,774 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731761392066.753427f7c0fbf1885adf61308024b068.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/7bb0efddddda4914a99e6a90793eed42 to hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/archive/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/7bb0efddddda4914a99e6a90793eed42 2024-11-16T12:50:16,775 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731761392066.753427f7c0fbf1885adf61308024b068.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/f5a0c50dd3f54aa1b939275d5600a8af to hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/archive/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/f5a0c50dd3f54aa1b939275d5600a8af 2024-11-16T12:50:16,776 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731761392066.753427f7c0fbf1885adf61308024b068.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/4209a8e5da4d4177b5b4fcf51e4b1084 to hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/archive/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/info/4209a8e5da4d4177b5b4fcf51e4b1084 2024-11-16T12:50:16,777 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731761392066.753427f7c0fbf1885adf61308024b068.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=0450ab8807f5:46653 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-11-16T12:50:16,777 WARN [StoreCloser-TestLogRolling-testLogRolling,row0062,1731761392066.753427f7c0fbf1885adf61308024b068.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [f37eceb1f8094c66b2f787e2ea2f0939=8260, dae8c05829bd4be2b3fbe4d74c3afd14=12509, 67654f8c2fe2454b8468cef3967cc67e=27778, a644592b64ea49c1a9bf69344526bd0f=16817, 1eb824ea41f34784b9ba218158a5ffbf=21142, b000d8996a1744b4b0149b84c3e3a125=55934, 28bbac39cde3424580b64f1d6537932e=16828, 33dc616900634249b7dc4f574c53bfeb=16828, f993de03991041eca195498765ceeb72=78811, 00e93218f90e4f59812df783174ab33c=15750, 2ce408dba1c94ec69ad48be48a60d107=16828, 32e4a8c7ec454fdaaa2c11ad85761baf=105871, bc30cbe6401f4e5390788d8de5f016fa=20078, 2b12a3ee354a44329cf1e518c3d3869f=12516, cc66912f878d4515aee2e77b85f97c5b=127665, 04f7f71d9f7a4d16a384b6222e878cf4=19000, dbb9a579c4f5471ab312d991c52e088f=19000, f3d5d46668ef4b00a824ea5ba93db69d=149401, 2b59ba78315a4eb98370093c17176336=12517, 8d1c9d61ec82470a87c66111e02e6bf8=20092, e7c6ae34920844989ac5d01b128757dd=16839, 18d89bed7ff340c3b8d8a445de7f735b=184073, 7bb0efddddda4914a99e6a90793eed42=12523, f5a0c50dd3f54aa1b939275d5600a8af=17918, 4209a8e5da4d4177b5b4fcf51e4b1084=21171] 2024-11-16T12:50:16,780 DEBUG [RS_CLOSE_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/753427f7c0fbf1885adf61308024b068/recovered.edits/343.seqid, newMaxSeqId=343, maxSeqId=85 2024-11-16T12:50:16,781 INFO [RS_CLOSE_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,row0062,1731761392066.753427f7c0fbf1885adf61308024b068. 2024-11-16T12:50:16,781 DEBUG [RS_CLOSE_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 753427f7c0fbf1885adf61308024b068: Waiting for close lock at 1731761416743Running coprocessor pre-close hooks at 1731761416743Disabling compacts and flushes for region at 1731761416743Disabling writes for close at 1731761416743Writing region close event to WAL at 1731761416777 (+34 ms)Running coprocessor post-close hooks at 1731761416781 (+4 ms)Closed at 1731761416781 2024-11-16T12:50:16,781 DEBUG [RS_CLOSE_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,row0062,1731761392066.753427f7c0fbf1885adf61308024b068. 2024-11-16T12:50:16,781 DEBUG [RS_CLOSE_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing e8eaec5ba20e3ee40e68e1509c73db72, disabling compactions & flushes 2024-11-16T12:50:16,781 INFO [RS_CLOSE_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731761392066.e8eaec5ba20e3ee40e68e1509c73db72. 2024-11-16T12:50:16,781 DEBUG [RS_CLOSE_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731761392066.e8eaec5ba20e3ee40e68e1509c73db72. 2024-11-16T12:50:16,781 DEBUG [RS_CLOSE_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731761392066.e8eaec5ba20e3ee40e68e1509c73db72. after waiting 0 ms 2024-11-16T12:50:16,781 DEBUG [RS_CLOSE_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731761392066.e8eaec5ba20e3ee40e68e1509c73db72. 2024-11-16T12:50:16,781 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731761392066.e8eaec5ba20e3ee40e68e1509c73db72.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/e8eaec5ba20e3ee40e68e1509c73db72/info/1df159e6bc5249a097db4245ac5b09a0.56327ae7c4c61355f9927cc610edcd05->hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/56327ae7c4c61355f9927cc610edcd05/info/1df159e6bc5249a097db4245ac5b09a0-bottom] to archive 2024-11-16T12:50:16,782 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731761392066.e8eaec5ba20e3ee40e68e1509c73db72.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-16T12:50:16,784 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731761392066.e8eaec5ba20e3ee40e68e1509c73db72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/e8eaec5ba20e3ee40e68e1509c73db72/info/1df159e6bc5249a097db4245ac5b09a0.56327ae7c4c61355f9927cc610edcd05 to hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/archive/data/default/TestLogRolling-testLogRolling/e8eaec5ba20e3ee40e68e1509c73db72/info/1df159e6bc5249a097db4245ac5b09a0.56327ae7c4c61355f9927cc610edcd05 2024-11-16T12:50:16,784 WARN [StoreCloser-TestLogRolling-testLogRolling,,1731761392066.e8eaec5ba20e3ee40e68e1509c73db72.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [] 2024-11-16T12:50:16,787 DEBUG [RS_CLOSE_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/data/default/TestLogRolling-testLogRolling/e8eaec5ba20e3ee40e68e1509c73db72/recovered.edits/90.seqid, newMaxSeqId=90, maxSeqId=85 2024-11-16T12:50:16,788 INFO [RS_CLOSE_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731761392066.e8eaec5ba20e3ee40e68e1509c73db72. 2024-11-16T12:50:16,788 DEBUG [RS_CLOSE_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for e8eaec5ba20e3ee40e68e1509c73db72: Waiting for close lock at 1731761416781Running coprocessor pre-close hooks at 1731761416781Disabling compacts and flushes for region at 1731761416781Disabling writes for close at 1731761416781Writing region close event to WAL at 1731761416784 (+3 ms)Running coprocessor post-close hooks at 1731761416788 (+4 ms)Closed at 1731761416788 2024-11-16T12:50:16,788 DEBUG [RS_CLOSE_REGION-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,,1731761392066.e8eaec5ba20e3ee40e68e1509c73db72. 2024-11-16T12:50:16,932 INFO [regionserver/0450ab8807f5:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-16T12:50:16,944 INFO [RS:0;0450ab8807f5:33541 {}] regionserver.HRegionServer(976): stopping server 0450ab8807f5,33541,1731761368545; all regions closed. 2024-11-16T12:50:16,945 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:50:16,945 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:50:16,945 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:50:16,945 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:50:16,945 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:50:16,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42515 is added to blk_1073741834_1010 (size=8107) 2024-11-16T12:50:16,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38281 is added to blk_1073741834_1010 (size=8107) 2024-11-16T12:50:16,950 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-16T12:50:16,950 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-16T12:50:16,951 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-11-16T12:50:16,951 DEBUG [RS:0;0450ab8807f5:33541 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/oldWALs 2024-11-16T12:50:16,951 INFO [RS:0;0450ab8807f5:33541 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 0450ab8807f5%2C33541%2C1731761368545.meta:.meta(num 1731761369544) 2024-11-16T12:50:16,952 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:50:16,952 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:50:16,952 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:50:16,952 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:50:16,952 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:50:16,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38281 is added to blk_1073741881_1057 (size=778) 2024-11-16T12:50:16,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42515 is added to blk_1073741881_1057 (size=778) 2024-11-16T12:50:16,963 DEBUG [RS:0;0450ab8807f5:33541 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/oldWALs 2024-11-16T12:50:16,963 INFO [RS:0;0450ab8807f5:33541 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 0450ab8807f5%2C33541%2C1731761368545:(num 1731761416583) 2024-11-16T12:50:16,963 DEBUG [RS:0;0450ab8807f5:33541 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T12:50:16,963 INFO [RS:0;0450ab8807f5:33541 {}] regionserver.LeaseManager(133): Closed leases 2024-11-16T12:50:16,963 INFO [RS:0;0450ab8807f5:33541 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-16T12:50:16,964 INFO [RS:0;0450ab8807f5:33541 {}] hbase.ChoreService(370): Chore service for: regionserver/0450ab8807f5:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-16T12:50:16,964 INFO [RS:0;0450ab8807f5:33541 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-16T12:50:16,964 INFO [regionserver/0450ab8807f5:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-16T12:50:16,964 INFO [RS:0;0450ab8807f5:33541 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33541 2024-11-16T12:50:16,991 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46653-0x10144fb00090000, quorum=127.0.0.1:60221, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-16T12:50:16,991 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33541-0x10144fb00090001, quorum=127.0.0.1:60221, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/0450ab8807f5,33541,1731761368545 2024-11-16T12:50:16,991 INFO [RS:0;0450ab8807f5:33541 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-16T12:50:16,991 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [0450ab8807f5,33541,1731761368545] 2024-11-16T12:50:17,007 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/0450ab8807f5,33541,1731761368545 already deleted, retry=false 2024-11-16T12:50:17,007 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 0450ab8807f5,33541,1731761368545 expired; onlineServers=0 2024-11-16T12:50:17,008 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '0450ab8807f5,46653,1731761368406' ***** 2024-11-16T12:50:17,008 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-16T12:50:17,008 INFO [M:0;0450ab8807f5:46653 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-16T12:50:17,008 INFO [M:0;0450ab8807f5:46653 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-16T12:50:17,008 DEBUG [M:0;0450ab8807f5:46653 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-16T12:50:17,008 DEBUG [M:0;0450ab8807f5:46653 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-16T12:50:17,008 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-16T12:50:17,008 DEBUG [master/0450ab8807f5:0:becomeActiveMaster-HFileCleaner.small.0-1731761368873 {}] cleaner.HFileCleaner(306): Exit Thread[master/0450ab8807f5:0:becomeActiveMaster-HFileCleaner.small.0-1731761368873,5,FailOnTimeoutGroup] 2024-11-16T12:50:17,008 INFO [M:0;0450ab8807f5:46653 {}] hbase.ChoreService(370): Chore service for: master/0450ab8807f5:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-16T12:50:17,009 INFO [M:0;0450ab8807f5:46653 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-16T12:50:17,009 DEBUG [M:0;0450ab8807f5:46653 {}] master.HMaster(1795): Stopping service threads 2024-11-16T12:50:17,009 INFO [M:0;0450ab8807f5:46653 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-16T12:50:17,008 DEBUG [master/0450ab8807f5:0:becomeActiveMaster-HFileCleaner.large.0-1731761368873 {}] cleaner.HFileCleaner(306): Exit Thread[master/0450ab8807f5:0:becomeActiveMaster-HFileCleaner.large.0-1731761368873,5,FailOnTimeoutGroup] 2024-11-16T12:50:17,009 INFO [M:0;0450ab8807f5:46653 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-16T12:50:17,010 INFO [M:0;0450ab8807f5:46653 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-16T12:50:17,010 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-16T12:50:17,016 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46653-0x10144fb00090000, quorum=127.0.0.1:60221, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-16T12:50:17,016 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46653-0x10144fb00090000, quorum=127.0.0.1:60221, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:50:17,016 DEBUG [M:0;0450ab8807f5:46653 {}] zookeeper.ZKUtil(347): master:46653-0x10144fb00090000, quorum=127.0.0.1:60221, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-16T12:50:17,016 WARN [M:0;0450ab8807f5:46653 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-16T12:50:17,016 INFO [M:0;0450ab8807f5:46653 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/.lastflushedseqids 2024-11-16T12:50:17,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38281 is added to blk_1073741882_1058 (size=228) 2024-11-16T12:50:17,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42515 is added to blk_1073741882_1058 (size=228) 2024-11-16T12:50:17,044 INFO [M:0;0450ab8807f5:46653 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-16T12:50:17,044 INFO [M:0;0450ab8807f5:46653 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-16T12:50:17,044 DEBUG [M:0;0450ab8807f5:46653 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-16T12:50:17,044 INFO [M:0;0450ab8807f5:46653 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T12:50:17,044 DEBUG [M:0;0450ab8807f5:46653 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T12:50:17,044 DEBUG [M:0;0450ab8807f5:46653 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-16T12:50:17,044 DEBUG [M:0;0450ab8807f5:46653 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T12:50:17,044 INFO [M:0;0450ab8807f5:46653 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=51.42 KB heapSize=63.35 KB 2024-11-16T12:50:17,067 DEBUG [M:0;0450ab8807f5:46653 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/55d3c9d6c6234be29aa2e60cb6ec156c is 82, key is hbase:meta,,1/info:regioninfo/1731761369570/Put/seqid=0 2024-11-16T12:50:17,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38281 is added to blk_1073741883_1059 (size=5672) 2024-11-16T12:50:17,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42515 is added to blk_1073741883_1059 (size=5672) 2024-11-16T12:50:17,077 INFO [M:0;0450ab8807f5:46653 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/55d3c9d6c6234be29aa2e60cb6ec156c 2024-11-16T12:50:17,099 INFO [RS:0;0450ab8807f5:33541 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-16T12:50:17,099 INFO [RS:0;0450ab8807f5:33541 {}] regionserver.HRegionServer(1031): Exiting; stopping=0450ab8807f5,33541,1731761368545; zookeeper connection closed. 2024-11-16T12:50:17,100 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33541-0x10144fb00090001, quorum=127.0.0.1:60221, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T12:50:17,100 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33541-0x10144fb00090001, quorum=127.0.0.1:60221, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T12:50:17,100 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@4faf7715 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@4faf7715 2024-11-16T12:50:17,100 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-16T12:50:17,109 DEBUG [M:0;0450ab8807f5:46653 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/8b3a68b2bbba4acd9caab22621ddbbae is 750, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731761370072/Put/seqid=0 2024-11-16T12:50:17,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42515 is added to blk_1073741884_1060 (size=7090) 2024-11-16T12:50:17,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38281 is added to blk_1073741884_1060 (size=7090) 2024-11-16T12:50:17,117 INFO [M:0;0450ab8807f5:46653 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=50.81 KB at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/8b3a68b2bbba4acd9caab22621ddbbae 2024-11-16T12:50:17,123 INFO [M:0;0450ab8807f5:46653 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 8b3a68b2bbba4acd9caab22621ddbbae 2024-11-16T12:50:17,144 DEBUG [M:0;0450ab8807f5:46653 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/5c48317c4f5a49b5a09199214797a571 is 69, key is 0450ab8807f5,33541,1731761368545/rs:state/1731761368907/Put/seqid=0 2024-11-16T12:50:17,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42515 is added to blk_1073741885_1061 (size=5156) 2024-11-16T12:50:17,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38281 is added to blk_1073741885_1061 (size=5156) 2024-11-16T12:50:17,150 INFO [M:0;0450ab8807f5:46653 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/5c48317c4f5a49b5a09199214797a571 2024-11-16T12:50:17,176 DEBUG [M:0;0450ab8807f5:46653 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/b3662a31177243afae915f69314da0a3 is 52, key is load_balancer_on/state:d/1731761369685/Put/seqid=0 2024-11-16T12:50:17,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42515 is added to blk_1073741886_1062 (size=5056) 2024-11-16T12:50:17,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38281 is added to blk_1073741886_1062 (size=5056) 2024-11-16T12:50:17,190 INFO [M:0;0450ab8807f5:46653 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/b3662a31177243afae915f69314da0a3 2024-11-16T12:50:17,196 DEBUG [M:0;0450ab8807f5:46653 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/55d3c9d6c6234be29aa2e60cb6ec156c as hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/55d3c9d6c6234be29aa2e60cb6ec156c 2024-11-16T12:50:17,203 INFO [M:0;0450ab8807f5:46653 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/55d3c9d6c6234be29aa2e60cb6ec156c, entries=8, sequenceid=125, filesize=5.5 K 2024-11-16T12:50:17,204 DEBUG [M:0;0450ab8807f5:46653 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/8b3a68b2bbba4acd9caab22621ddbbae as hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/8b3a68b2bbba4acd9caab22621ddbbae 2024-11-16T12:50:17,210 INFO [M:0;0450ab8807f5:46653 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 8b3a68b2bbba4acd9caab22621ddbbae 2024-11-16T12:50:17,210 INFO [M:0;0450ab8807f5:46653 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/8b3a68b2bbba4acd9caab22621ddbbae, entries=13, sequenceid=125, filesize=6.9 K 2024-11-16T12:50:17,211 DEBUG [M:0;0450ab8807f5:46653 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/5c48317c4f5a49b5a09199214797a571 as hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/5c48317c4f5a49b5a09199214797a571 2024-11-16T12:50:17,223 INFO [M:0;0450ab8807f5:46653 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/5c48317c4f5a49b5a09199214797a571, entries=1, sequenceid=125, filesize=5.0 K 2024-11-16T12:50:17,225 DEBUG [M:0;0450ab8807f5:46653 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/b3662a31177243afae915f69314da0a3 as hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/b3662a31177243afae915f69314da0a3 2024-11-16T12:50:17,231 INFO [M:0;0450ab8807f5:46653 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39017/user/jenkins/test-data/fd000785-00a8-00c8-faa2-d47a7afd59c0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/b3662a31177243afae915f69314da0a3, entries=1, sequenceid=125, filesize=4.9 K 2024-11-16T12:50:17,233 INFO [M:0;0450ab8807f5:46653 {}] regionserver.HRegion(3140): Finished flush of dataSize ~51.42 KB/52651, heapSize ~63.29 KB/64808, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 189ms, sequenceid=125, compaction requested=false 2024-11-16T12:50:17,235 INFO [M:0;0450ab8807f5:46653 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T12:50:17,235 DEBUG [M:0;0450ab8807f5:46653 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731761417044Disabling compacts and flushes for region at 1731761417044Disabling writes for close at 1731761417044Obtaining lock to block concurrent updates at 1731761417044Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731761417044Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=52651, getHeapSize=64808, getOffHeapSize=0, getCellsCount=148 at 1731761417045 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731761417046 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731761417046Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731761417066 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731761417066Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731761417084 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731761417109 (+25 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731761417109Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731761417123 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731761417143 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731761417143Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731761417156 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731761417175 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731761417175Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@13f76491: reopening flushed file at 1731761417195 (+20 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@14d65ea1: reopening flushed file at 1731761417203 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1cdd6c5a: reopening flushed file at 1731761417210 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@792e849c: reopening flushed file at 1731761417224 (+14 ms)Finished flush of dataSize ~51.42 KB/52651, heapSize ~63.29 KB/64808, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 189ms, sequenceid=125, compaction requested=false at 1731761417233 (+9 ms)Writing region close event to WAL at 1731761417235 (+2 ms)Closed at 1731761417235 2024-11-16T12:50:17,237 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:50:17,237 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:50:17,237 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:50:17,237 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:50:17,238 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:50:17,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38281 is added to blk_1073741830_1006 (size=61320) 2024-11-16T12:50:17,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42515 is added to blk_1073741830_1006 (size=61320) 2024-11-16T12:50:17,241 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-16T12:50:17,242 INFO [M:0;0450ab8807f5:46653 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-16T12:50:17,242 INFO [M:0;0450ab8807f5:46653 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46653 2024-11-16T12:50:17,242 INFO [M:0;0450ab8807f5:46653 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-16T12:50:17,374 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46653-0x10144fb00090000, quorum=127.0.0.1:60221, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T12:50:17,374 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46653-0x10144fb00090000, quorum=127.0.0.1:60221, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T12:50:17,374 INFO [M:0;0450ab8807f5:46653 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-16T12:50:17,379 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@603e6d9b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T12:50:17,379 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@58ab8e7a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T12:50:17,379 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T12:50:17,379 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5da9c931{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T12:50:17,379 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@48b1d1cb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6be5e90c-05d0-eec2-fb64-f2e1aecaeaf6/hadoop.log.dir/,STOPPED} 2024-11-16T12:50:17,382 WARN [BP-1746512817-172.17.0.2-1731761366811 heartbeating to localhost/127.0.0.1:39017 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T12:50:17,382 WARN [BP-1746512817-172.17.0.2-1731761366811 heartbeating to localhost/127.0.0.1:39017 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1746512817-172.17.0.2-1731761366811 (Datanode Uuid 984e258d-2436-4997-8aea-d0238996180a) service to localhost/127.0.0.1:39017 2024-11-16T12:50:17,383 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6be5e90c-05d0-eec2-fb64-f2e1aecaeaf6/cluster_f97aeae6-9f63-e107-b4f0-d9a7ad2afc9f/data/data3/current/BP-1746512817-172.17.0.2-1731761366811 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T12:50:17,383 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6be5e90c-05d0-eec2-fb64-f2e1aecaeaf6/cluster_f97aeae6-9f63-e107-b4f0-d9a7ad2afc9f/data/data4/current/BP-1746512817-172.17.0.2-1731761366811 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T12:50:17,383 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T12:50:17,383 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T12:50:17,384 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T12:50:17,395 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@72925ee1{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T12:50:17,396 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6f4aa33e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T12:50:17,396 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T12:50:17,396 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@286b8c80{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T12:50:17,396 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6a5db76d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6be5e90c-05d0-eec2-fb64-f2e1aecaeaf6/hadoop.log.dir/,STOPPED} 2024-11-16T12:50:17,398 WARN [BP-1746512817-172.17.0.2-1731761366811 heartbeating to localhost/127.0.0.1:39017 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T12:50:17,398 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T12:50:17,398 WARN [BP-1746512817-172.17.0.2-1731761366811 heartbeating to localhost/127.0.0.1:39017 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1746512817-172.17.0.2-1731761366811 (Datanode Uuid 1e9107ee-5017-401a-a398-1aa41c5fb377) service to localhost/127.0.0.1:39017 2024-11-16T12:50:17,398 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T12:50:17,399 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6be5e90c-05d0-eec2-fb64-f2e1aecaeaf6/cluster_f97aeae6-9f63-e107-b4f0-d9a7ad2afc9f/data/data1/current/BP-1746512817-172.17.0.2-1731761366811 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T12:50:17,399 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6be5e90c-05d0-eec2-fb64-f2e1aecaeaf6/cluster_f97aeae6-9f63-e107-b4f0-d9a7ad2afc9f/data/data2/current/BP-1746512817-172.17.0.2-1731761366811 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T12:50:17,399 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T12:50:17,409 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2eb912ab{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-16T12:50:17,410 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@37b300d0{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T12:50:17,410 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T12:50:17,410 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@629cd82f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T12:50:17,410 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@42d8f83b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6be5e90c-05d0-eec2-fb64-f2e1aecaeaf6/hadoop.log.dir/,STOPPED} 2024-11-16T12:50:17,420 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-16T12:50:17,429 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:50:17,465 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-16T12:50:17,476 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRolling Thread=232 (was 208) Potentially hanging thread: nioEventLoopGroup-41-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39017 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39017 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.6@localhost:39017 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:39017 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:39017 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:39017 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-38-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:39017 from jenkins.hfs.6 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-39-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39017 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=514 (was 486) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=175 (was 229), ProcessCount=11 (was 11), AvailableMemoryMB=4051 (was 2842) - AvailableMemoryMB LEAK? - 2024-11-16T12:50:17,486 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=232, OpenFileDescriptor=514, MaxFileDescriptor=1048576, SystemLoadAverage=175, ProcessCount=11, AvailableMemoryMB=4051 2024-11-16T12:50:17,487 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-16T12:50:17,487 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6be5e90c-05d0-eec2-fb64-f2e1aecaeaf6/hadoop.log.dir so I do NOT create it in target/test-data/0da54629-5700-375f-9042-9db10827b77c 2024-11-16T12:50:17,487 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6be5e90c-05d0-eec2-fb64-f2e1aecaeaf6/hadoop.tmp.dir so I do NOT create it in target/test-data/0da54629-5700-375f-9042-9db10827b77c 2024-11-16T12:50:17,487 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0da54629-5700-375f-9042-9db10827b77c/cluster_ba51f5e4-209e-d688-5b2e-a309fa8086a8, deleteOnExit=true 2024-11-16T12:50:17,487 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-16T12:50:17,487 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0da54629-5700-375f-9042-9db10827b77c/test.cache.data in system properties and HBase conf 2024-11-16T12:50:17,487 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0da54629-5700-375f-9042-9db10827b77c/hadoop.tmp.dir in system properties and HBase conf 2024-11-16T12:50:17,488 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0da54629-5700-375f-9042-9db10827b77c/hadoop.log.dir in system properties and HBase conf 2024-11-16T12:50:17,488 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0da54629-5700-375f-9042-9db10827b77c/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-16T12:50:17,488 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0da54629-5700-375f-9042-9db10827b77c/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-16T12:50:17,488 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-16T12:50:17,488 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-16T12:50:17,488 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0da54629-5700-375f-9042-9db10827b77c/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-16T12:50:17,488 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0da54629-5700-375f-9042-9db10827b77c/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-16T12:50:17,488 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0da54629-5700-375f-9042-9db10827b77c/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-16T12:50:17,488 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0da54629-5700-375f-9042-9db10827b77c/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-16T12:50:17,489 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0da54629-5700-375f-9042-9db10827b77c/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-16T12:50:17,489 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0da54629-5700-375f-9042-9db10827b77c/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-16T12:50:17,489 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0da54629-5700-375f-9042-9db10827b77c/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-16T12:50:17,489 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0da54629-5700-375f-9042-9db10827b77c/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-16T12:50:17,489 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0da54629-5700-375f-9042-9db10827b77c/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-16T12:50:17,489 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0da54629-5700-375f-9042-9db10827b77c/nfs.dump.dir in system properties and HBase conf 2024-11-16T12:50:17,489 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0da54629-5700-375f-9042-9db10827b77c/java.io.tmpdir in system properties and HBase conf 2024-11-16T12:50:17,489 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0da54629-5700-375f-9042-9db10827b77c/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-16T12:50:17,489 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0da54629-5700-375f-9042-9db10827b77c/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-16T12:50:17,489 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0da54629-5700-375f-9042-9db10827b77c/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-16T12:50:17,508 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-16T12:50:17,628 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:50:17,778 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T12:50:17,781 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T12:50:17,782 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T12:50:17,782 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T12:50:17,783 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-16T12:50:17,785 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T12:50:17,785 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@17951be7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0da54629-5700-375f-9042-9db10827b77c/hadoop.log.dir/,AVAILABLE} 2024-11-16T12:50:17,785 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1b7a9b88{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T12:50:17,883 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5fea8446{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0da54629-5700-375f-9042-9db10827b77c/java.io.tmpdir/jetty-localhost-39015-hadoop-hdfs-3_4_1-tests_jar-_-any-9338794055720112053/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-16T12:50:17,883 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@413a6699{HTTP/1.1, (http/1.1)}{localhost:39015} 2024-11-16T12:50:17,883 INFO [Time-limited test {}] server.Server(415): Started @296770ms 2024-11-16T12:50:17,895 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-16T12:50:18,075 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T12:50:18,077 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T12:50:18,080 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T12:50:18,080 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T12:50:18,080 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-16T12:50:18,080 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@61ab51b1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0da54629-5700-375f-9042-9db10827b77c/hadoop.log.dir/,AVAILABLE} 2024-11-16T12:50:18,081 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5ae1ce13{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T12:50:18,174 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4c9b811e{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0da54629-5700-375f-9042-9db10827b77c/java.io.tmpdir/jetty-localhost-36961-hadoop-hdfs-3_4_1-tests_jar-_-any-10434370148217343489/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T12:50:18,175 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@548e17de{HTTP/1.1, (http/1.1)}{localhost:36961} 2024-11-16T12:50:18,175 INFO [Time-limited test {}] server.Server(415): Started @297062ms 2024-11-16T12:50:18,176 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T12:50:18,213 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T12:50:18,215 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T12:50:18,216 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T12:50:18,216 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T12:50:18,216 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-16T12:50:18,216 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@59dd2dec{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0da54629-5700-375f-9042-9db10827b77c/hadoop.log.dir/,AVAILABLE} 2024-11-16T12:50:18,216 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4a03636e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T12:50:18,315 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7b2e282d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0da54629-5700-375f-9042-9db10827b77c/java.io.tmpdir/jetty-localhost-36157-hadoop-hdfs-3_4_1-tests_jar-_-any-701875360324290934/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T12:50:18,316 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@31bc17f3{HTTP/1.1, (http/1.1)}{localhost:36157} 2024-11-16T12:50:18,316 INFO [Time-limited test {}] server.Server(415): Started @297203ms 2024-11-16T12:50:18,317 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T12:50:18,430 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:50:18,628 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:50:18,982 WARN [Thread-2495 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0da54629-5700-375f-9042-9db10827b77c/cluster_ba51f5e4-209e-d688-5b2e-a309fa8086a8/data/data2/current/BP-2127107745-172.17.0.2-1731761417512/current, will proceed with Du for space computation calculation, 2024-11-16T12:50:18,982 WARN [Thread-2494 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0da54629-5700-375f-9042-9db10827b77c/cluster_ba51f5e4-209e-d688-5b2e-a309fa8086a8/data/data1/current/BP-2127107745-172.17.0.2-1731761417512/current, will proceed with Du for space computation calculation, 2024-11-16T12:50:19,001 WARN [Thread-2458 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T12:50:19,002 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc17aa4ba43f17c59 with lease ID 0x281ec937e708a823: Processing first storage report for DS-cdf66601-8ccc-46b5-b17e-c2c98c52ee28 from datanode DatanodeRegistration(127.0.0.1:40507, datanodeUuid=290d6b3c-8743-4b3e-ba66-ad240659567d, infoPort=33411, infoSecurePort=0, ipcPort=41669, storageInfo=lv=-57;cid=testClusterID;nsid=1904151751;c=1731761417512) 2024-11-16T12:50:19,002 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc17aa4ba43f17c59 with lease ID 0x281ec937e708a823: from storage DS-cdf66601-8ccc-46b5-b17e-c2c98c52ee28 node DatanodeRegistration(127.0.0.1:40507, datanodeUuid=290d6b3c-8743-4b3e-ba66-ad240659567d, infoPort=33411, infoSecurePort=0, ipcPort=41669, storageInfo=lv=-57;cid=testClusterID;nsid=1904151751;c=1731761417512), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T12:50:19,003 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc17aa4ba43f17c59 with lease ID 0x281ec937e708a823: Processing first storage report for DS-c03aba4d-9b56-46f9-b32f-967f7ccbdfeb from datanode DatanodeRegistration(127.0.0.1:40507, datanodeUuid=290d6b3c-8743-4b3e-ba66-ad240659567d, infoPort=33411, infoSecurePort=0, ipcPort=41669, storageInfo=lv=-57;cid=testClusterID;nsid=1904151751;c=1731761417512) 2024-11-16T12:50:19,003 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc17aa4ba43f17c59 with lease ID 0x281ec937e708a823: from storage DS-c03aba4d-9b56-46f9-b32f-967f7ccbdfeb node DatanodeRegistration(127.0.0.1:40507, datanodeUuid=290d6b3c-8743-4b3e-ba66-ad240659567d, infoPort=33411, infoSecurePort=0, ipcPort=41669, storageInfo=lv=-57;cid=testClusterID;nsid=1904151751;c=1731761417512), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T12:50:19,126 WARN [Thread-2506 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0da54629-5700-375f-9042-9db10827b77c/cluster_ba51f5e4-209e-d688-5b2e-a309fa8086a8/data/data4/current/BP-2127107745-172.17.0.2-1731761417512/current, will proceed with Du for space computation calculation, 2024-11-16T12:50:19,126 WARN [Thread-2505 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0da54629-5700-375f-9042-9db10827b77c/cluster_ba51f5e4-209e-d688-5b2e-a309fa8086a8/data/data3/current/BP-2127107745-172.17.0.2-1731761417512/current, will proceed with Du for space computation calculation, 2024-11-16T12:50:19,150 WARN [Thread-2481 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T12:50:19,152 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xfc5af33681686629 with lease ID 0x281ec937e708a824: Processing first storage report for DS-8c8dd182-a77b-47cb-a645-c57f1ce7bfce from datanode DatanodeRegistration(127.0.0.1:44511, datanodeUuid=b5667f6d-ec32-4913-aa7f-9afeb4a617c3, infoPort=37489, infoSecurePort=0, ipcPort=44105, storageInfo=lv=-57;cid=testClusterID;nsid=1904151751;c=1731761417512) 2024-11-16T12:50:19,152 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfc5af33681686629 with lease ID 0x281ec937e708a824: from storage DS-8c8dd182-a77b-47cb-a645-c57f1ce7bfce node DatanodeRegistration(127.0.0.1:44511, datanodeUuid=b5667f6d-ec32-4913-aa7f-9afeb4a617c3, infoPort=37489, infoSecurePort=0, ipcPort=44105, storageInfo=lv=-57;cid=testClusterID;nsid=1904151751;c=1731761417512), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T12:50:19,152 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xfc5af33681686629 with lease ID 0x281ec937e708a824: Processing first storage report for DS-6b63174d-8efc-43b9-8c13-5182c61249d1 from datanode DatanodeRegistration(127.0.0.1:44511, datanodeUuid=b5667f6d-ec32-4913-aa7f-9afeb4a617c3, infoPort=37489, infoSecurePort=0, ipcPort=44105, storageInfo=lv=-57;cid=testClusterID;nsid=1904151751;c=1731761417512) 2024-11-16T12:50:19,152 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfc5af33681686629 with lease ID 0x281ec937e708a824: from storage DS-6b63174d-8efc-43b9-8c13-5182c61249d1 node DatanodeRegistration(127.0.0.1:44511, datanodeUuid=b5667f6d-ec32-4913-aa7f-9afeb4a617c3, infoPort=37489, infoSecurePort=0, ipcPort=44105, storageInfo=lv=-57;cid=testClusterID;nsid=1904151751;c=1731761417512), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T12:50:19,243 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0da54629-5700-375f-9042-9db10827b77c 2024-11-16T12:50:19,251 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0da54629-5700-375f-9042-9db10827b77c/cluster_ba51f5e4-209e-d688-5b2e-a309fa8086a8/zookeeper_0, clientPort=62323, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0da54629-5700-375f-9042-9db10827b77c/cluster_ba51f5e4-209e-d688-5b2e-a309fa8086a8/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0da54629-5700-375f-9042-9db10827b77c/cluster_ba51f5e4-209e-d688-5b2e-a309fa8086a8/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-16T12:50:19,252 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=62323 2024-11-16T12:50:19,252 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T12:50:19,253 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T12:50:19,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44511 is added to blk_1073741825_1001 (size=7) 2024-11-16T12:50:19,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40507 is added to blk_1073741825_1001 (size=7) 2024-11-16T12:50:19,268 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:44067/user/jenkins/test-data/e9988f0c-fbf5-7de2-dac4-f0429edf6801 with version=8 2024-11-16T12:50:19,268 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:36417/user/jenkins/test-data/900d1c95-23ac-07e8-9a39-e671f4b04922/hbase-staging 2024-11-16T12:50:19,270 INFO [Time-limited test {}] client.ConnectionUtils(128): master/0450ab8807f5:0 server-side Connection retries=45 2024-11-16T12:50:19,270 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T12:50:19,270 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-16T12:50:19,270 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-16T12:50:19,270 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T12:50:19,270 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-16T12:50:19,270 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-16T12:50:19,270 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-16T12:50:19,271 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40947 2024-11-16T12:50:19,272 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:40947 connecting to ZooKeeper ensemble=127.0.0.1:62323 2024-11-16T12:50:19,329 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:409470x0, quorum=127.0.0.1:62323, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-16T12:50:19,329 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:40947-0x10144fbc6b50000 connected 2024-11-16T12:50:19,430 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:50:19,456 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T12:50:19,457 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T12:50:19,459 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40947-0x10144fbc6b50000, quorum=127.0.0.1:62323, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T12:50:19,459 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:44067/user/jenkins/test-data/e9988f0c-fbf5-7de2-dac4-f0429edf6801, hbase.cluster.distributed=false 2024-11-16T12:50:19,461 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40947-0x10144fbc6b50000, quorum=127.0.0.1:62323, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-16T12:50:19,463 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40947 2024-11-16T12:50:19,463 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40947 2024-11-16T12:50:19,463 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40947 2024-11-16T12:50:19,464 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40947 2024-11-16T12:50:19,464 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40947 2024-11-16T12:50:19,478 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/0450ab8807f5:0 server-side Connection retries=45 2024-11-16T12:50:19,478 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T12:50:19,478 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-16T12:50:19,478 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-16T12:50:19,478 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T12:50:19,478 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-16T12:50:19,478 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-16T12:50:19,478 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-16T12:50:19,479 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:43383 2024-11-16T12:50:19,480 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:43383 connecting to ZooKeeper ensemble=127.0.0.1:62323 2024-11-16T12:50:19,480 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T12:50:19,481 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T12:50:19,510 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:433830x0, quorum=127.0.0.1:62323, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-16T12:50:19,510 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:433830x0, quorum=127.0.0.1:62323, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T12:50:19,510 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:43383-0x10144fbc6b50001 connected 2024-11-16T12:50:19,510 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-16T12:50:19,511 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-16T12:50:19,511 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43383-0x10144fbc6b50001, quorum=127.0.0.1:62323, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-16T12:50:19,512 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43383-0x10144fbc6b50001, quorum=127.0.0.1:62323, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-16T12:50:19,513 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43383 2024-11-16T12:50:19,513 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43383 2024-11-16T12:50:19,513 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43383 2024-11-16T12:50:19,513 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43383 2024-11-16T12:50:19,513 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43383 2024-11-16T12:50:19,524 DEBUG [M:0;0450ab8807f5:40947 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;0450ab8807f5:40947 2024-11-16T12:50:19,525 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/0450ab8807f5,40947,1731761419269 2024-11-16T12:50:19,547 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40947-0x10144fbc6b50000, quorum=127.0.0.1:62323, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T12:50:19,547 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43383-0x10144fbc6b50001, quorum=127.0.0.1:62323, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T12:50:19,548 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40947-0x10144fbc6b50000, quorum=127.0.0.1:62323, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/0450ab8807f5,40947,1731761419269 2024-11-16T12:50:19,555 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40947-0x10144fbc6b50000, quorum=127.0.0.1:62323, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:50:19,555 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43383-0x10144fbc6b50001, quorum=127.0.0.1:62323, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-16T12:50:19,556 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43383-0x10144fbc6b50001, quorum=127.0.0.1:62323, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:50:19,556 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40947-0x10144fbc6b50000, quorum=127.0.0.1:62323, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-16T12:50:19,556 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/0450ab8807f5,40947,1731761419269 from backup master directory 2024-11-16T12:50:19,564 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43383-0x10144fbc6b50001, quorum=127.0.0.1:62323, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T12:50:19,564 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40947-0x10144fbc6b50000, quorum=127.0.0.1:62323, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/0450ab8807f5,40947,1731761419269 2024-11-16T12:50:19,564 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40947-0x10144fbc6b50000, quorum=127.0.0.1:62323, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T12:50:19,564 WARN [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-16T12:50:19,564 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=0450ab8807f5,40947,1731761419269 2024-11-16T12:50:19,567 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:44067/user/jenkins/test-data/e9988f0c-fbf5-7de2-dac4-f0429edf6801/hbase.id] with ID: 517f81ec-742b-4953-a53c-224d20444b39 2024-11-16T12:50:19,567 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:44067/user/jenkins/test-data/e9988f0c-fbf5-7de2-dac4-f0429edf6801/.tmp/hbase.id 2024-11-16T12:50:19,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44511 is added to blk_1073741826_1002 (size=42) 2024-11-16T12:50:19,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40507 is added to blk_1073741826_1002 (size=42) 2024-11-16T12:50:19,573 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:44067/user/jenkins/test-data/e9988f0c-fbf5-7de2-dac4-f0429edf6801/.tmp/hbase.id]:[hdfs://localhost:44067/user/jenkins/test-data/e9988f0c-fbf5-7de2-dac4-f0429edf6801/hbase.id] 2024-11-16T12:50:19,583 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T12:50:19,583 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-16T12:50:19,584 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-16T12:50:19,589 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43383-0x10144fbc6b50001, quorum=127.0.0.1:62323, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:50:19,589 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40947-0x10144fbc6b50000, quorum=127.0.0.1:62323, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:50:19,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44511 is added to blk_1073741827_1003 (size=196) 2024-11-16T12:50:19,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40507 is added to blk_1073741827_1003 (size=196) 2024-11-16T12:50:19,595 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-16T12:50:19,596 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-16T12:50:19,596 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T12:50:19,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40507 is added to blk_1073741828_1004 (size=1189) 2024-11-16T12:50:19,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44511 is added to blk_1073741828_1004 (size=1189) 2024-11-16T12:50:19,603 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:44067/user/jenkins/test-data/e9988f0c-fbf5-7de2-dac4-f0429edf6801/MasterData/data/master/store 2024-11-16T12:50:19,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40507 is added to blk_1073741829_1005 (size=34) 2024-11-16T12:50:19,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44511 is added to blk_1073741829_1005 (size=34) 2024-11-16T12:50:19,614 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T12:50:19,614 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-16T12:50:19,614 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T12:50:19,614 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T12:50:19,614 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-16T12:50:19,614 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T12:50:19,614 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T12:50:19,614 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731761419614Disabling compacts and flushes for region at 1731761419614Disabling writes for close at 1731761419614Writing region close event to WAL at 1731761419614Closed at 1731761419614 2024-11-16T12:50:19,615 WARN [master/0450ab8807f5:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:44067/user/jenkins/test-data/e9988f0c-fbf5-7de2-dac4-f0429edf6801/MasterData/data/master/store/.initializing 2024-11-16T12:50:19,615 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:44067/user/jenkins/test-data/e9988f0c-fbf5-7de2-dac4-f0429edf6801/MasterData/WALs/0450ab8807f5,40947,1731761419269 2024-11-16T12:50:19,618 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0450ab8807f5%2C40947%2C1731761419269, suffix=, logDir=hdfs://localhost:44067/user/jenkins/test-data/e9988f0c-fbf5-7de2-dac4-f0429edf6801/MasterData/WALs/0450ab8807f5,40947,1731761419269, archiveDir=hdfs://localhost:44067/user/jenkins/test-data/e9988f0c-fbf5-7de2-dac4-f0429edf6801/MasterData/oldWALs, maxLogs=10 2024-11-16T12:50:19,618 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0450ab8807f5%2C40947%2C1731761419269.1731761419618 2024-11-16T12:50:19,623 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e9988f0c-fbf5-7de2-dac4-f0429edf6801/MasterData/WALs/0450ab8807f5,40947,1731761419269/0450ab8807f5%2C40947%2C1731761419269.1731761419618 2024-11-16T12:50:19,627 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37489:37489),(127.0.0.1/127.0.0.1:33411:33411)] 2024-11-16T12:50:19,629 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:50:19,630 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-16T12:50:19,630 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T12:50:19,630 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T12:50:19,630 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T12:50:19,632 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T12:50:19,633 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-16T12:50:19,633 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:50:19,634 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T12:50:19,634 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T12:50:19,635 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-16T12:50:19,635 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:50:19,636 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T12:50:19,636 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T12:50:19,637 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-16T12:50:19,637 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:50:19,638 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T12:50:19,638 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T12:50:19,639 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-16T12:50:19,639 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:50:19,640 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T12:50:19,640 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T12:50:19,641 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44067/user/jenkins/test-data/e9988f0c-fbf5-7de2-dac4-f0429edf6801/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-16T12:50:19,641 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44067/user/jenkins/test-data/e9988f0c-fbf5-7de2-dac4-f0429edf6801/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-16T12:50:19,642 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T12:50:19,642 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T12:50:19,643 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-16T12:50:19,644 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T12:50:19,646 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44067/user/jenkins/test-data/e9988f0c-fbf5-7de2-dac4-f0429edf6801/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T12:50:19,647 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=778108, jitterRate=-0.010584890842437744}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-16T12:50:19,648 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731761419630Initializing all the Stores at 1731761419631 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731761419631Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731761419632 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731761419632Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731761419632Cleaning up temporary data from old regions at 1731761419642 (+10 ms)Region opened successfully at 1731761419648 (+6 ms) 2024-11-16T12:50:19,648 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-16T12:50:19,652 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@74629d9a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=0450ab8807f5/172.17.0.2:0 2024-11-16T12:50:19,653 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-16T12:50:19,653 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-16T12:50:19,653 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-16T12:50:19,653 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-16T12:50:19,654 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-16T12:50:19,654 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-16T12:50:19,654 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-16T12:50:19,658 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-16T12:50:19,659 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40947-0x10144fbc6b50000, quorum=127.0.0.1:62323, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-16T12:50:19,672 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-16T12:50:19,673 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-16T12:50:19,673 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40947-0x10144fbc6b50000, quorum=127.0.0.1:62323, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-16T12:50:19,681 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-16T12:50:19,681 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-16T12:50:19,682 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40947-0x10144fbc6b50000, quorum=127.0.0.1:62323, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-16T12:50:19,689 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-16T12:50:19,690 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40947-0x10144fbc6b50000, quorum=127.0.0.1:62323, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-16T12:50:19,697 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-16T12:50:19,700 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40947-0x10144fbc6b50000, quorum=127.0.0.1:62323, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-16T12:50:19,705 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-16T12:50:19,714 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43383-0x10144fbc6b50001, quorum=127.0.0.1:62323, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-16T12:50:19,714 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40947-0x10144fbc6b50000, quorum=127.0.0.1:62323, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-16T12:50:19,714 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43383-0x10144fbc6b50001, quorum=127.0.0.1:62323, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:50:19,714 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40947-0x10144fbc6b50000, quorum=127.0.0.1:62323, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:50:19,714 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=0450ab8807f5,40947,1731761419269, sessionid=0x10144fbc6b50000, setting cluster-up flag (Was=false) 2024-11-16T12:50:19,730 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43383-0x10144fbc6b50001, quorum=127.0.0.1:62323, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:50:19,730 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40947-0x10144fbc6b50000, quorum=127.0.0.1:62323, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:50:19,756 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-16T12:50:19,757 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=0450ab8807f5,40947,1731761419269 2024-11-16T12:50:19,772 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43383-0x10144fbc6b50001, quorum=127.0.0.1:62323, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:50:19,772 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40947-0x10144fbc6b50000, quorum=127.0.0.1:62323, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:50:19,797 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-16T12:50:19,798 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=0450ab8807f5,40947,1731761419269 2024-11-16T12:50:19,800 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:44067/user/jenkins/test-data/e9988f0c-fbf5-7de2-dac4-f0429edf6801/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-16T12:50:19,801 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-16T12:50:19,801 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-16T12:50:19,801 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-16T12:50:19,802 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 0450ab8807f5,40947,1731761419269 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-16T12:50:19,803 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/0450ab8807f5:0, corePoolSize=5, maxPoolSize=5 2024-11-16T12:50:19,803 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/0450ab8807f5:0, corePoolSize=5, maxPoolSize=5 2024-11-16T12:50:19,803 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/0450ab8807f5:0, corePoolSize=5, maxPoolSize=5 2024-11-16T12:50:19,803 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/0450ab8807f5:0, corePoolSize=5, maxPoolSize=5 2024-11-16T12:50:19,803 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/0450ab8807f5:0, corePoolSize=10, maxPoolSize=10 2024-11-16T12:50:19,803 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:50:19,803 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/0450ab8807f5:0, corePoolSize=2, maxPoolSize=2 2024-11-16T12:50:19,803 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:50:19,804 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731761449804 2024-11-16T12:50:19,804 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-16T12:50:19,804 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-16T12:50:19,804 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-16T12:50:19,805 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-16T12:50:19,805 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-16T12:50:19,805 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-16T12:50:19,805 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T12:50:19,805 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-16T12:50:19,805 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-16T12:50:19,805 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-16T12:50:19,805 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-16T12:50:19,805 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-16T12:50:19,805 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-16T12:50:19,805 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-16T12:50:19,806 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:50:19,806 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-16T12:50:19,806 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/0450ab8807f5:0:becomeActiveMaster-HFileCleaner.large.0-1731761419805,5,FailOnTimeoutGroup] 2024-11-16T12:50:19,806 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/0450ab8807f5:0:becomeActiveMaster-HFileCleaner.small.0-1731761419806,5,FailOnTimeoutGroup] 2024-11-16T12:50:19,807 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-16T12:50:19,807 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-16T12:50:19,807 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-16T12:50:19,807 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-16T12:50:19,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44511 is added to blk_1073741831_1007 (size=1321) 2024-11-16T12:50:19,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40507 is added to blk_1073741831_1007 (size=1321) 2024-11-16T12:50:19,815 INFO [RS:0;0450ab8807f5:43383 {}] regionserver.HRegionServer(746): ClusterId : 517f81ec-742b-4953-a53c-224d20444b39 2024-11-16T12:50:19,815 DEBUG [RS:0;0450ab8807f5:43383 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-16T12:50:19,815 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:44067/user/jenkins/test-data/e9988f0c-fbf5-7de2-dac4-f0429edf6801/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-16T12:50:19,815 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:44067/user/jenkins/test-data/e9988f0c-fbf5-7de2-dac4-f0429edf6801 2024-11-16T12:50:19,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44511 is added to blk_1073741832_1008 (size=32) 2024-11-16T12:50:19,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40507 is added to blk_1073741832_1008 (size=32) 2024-11-16T12:50:19,821 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T12:50:19,822 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-16T12:50:19,823 DEBUG [RS:0;0450ab8807f5:43383 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-16T12:50:19,823 DEBUG [RS:0;0450ab8807f5:43383 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-16T12:50:19,824 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-16T12:50:19,824 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:50:19,824 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T12:50:19,824 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-16T12:50:19,825 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-16T12:50:19,825 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:50:19,826 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T12:50:19,826 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-16T12:50:19,827 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-16T12:50:19,827 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:50:19,827 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T12:50:19,827 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-16T12:50:19,828 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-16T12:50:19,828 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:50:19,828 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T12:50:19,828 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-16T12:50:19,829 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44067/user/jenkins/test-data/e9988f0c-fbf5-7de2-dac4-f0429edf6801/data/hbase/meta/1588230740 2024-11-16T12:50:19,829 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44067/user/jenkins/test-data/e9988f0c-fbf5-7de2-dac4-f0429edf6801/data/hbase/meta/1588230740 2024-11-16T12:50:19,830 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-16T12:50:19,830 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-16T12:50:19,830 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-16T12:50:19,831 DEBUG [RS:0;0450ab8807f5:43383 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-16T12:50:19,831 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-16T12:50:19,831 DEBUG [RS:0;0450ab8807f5:43383 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6cb79559, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=0450ab8807f5/172.17.0.2:0 2024-11-16T12:50:19,833 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44067/user/jenkins/test-data/e9988f0c-fbf5-7de2-dac4-f0429edf6801/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T12:50:19,833 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=796926, jitterRate=0.01334427297115326}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-16T12:50:19,834 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731761419821Initializing all the Stores at 1731761419822 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731761419822Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731761419822Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731761419822Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731761419822Cleaning up temporary data from old regions at 1731761419830 (+8 ms)Region opened successfully at 1731761419834 (+4 ms) 2024-11-16T12:50:19,834 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-16T12:50:19,834 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-16T12:50:19,834 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-16T12:50:19,834 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-16T12:50:19,834 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-16T12:50:19,834 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-16T12:50:19,835 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731761419834Disabling compacts and flushes for region at 1731761419834Disabling writes for close at 1731761419834Writing region close event to WAL at 1731761419834Closed at 1731761419834 2024-11-16T12:50:19,835 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T12:50:19,835 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-16T12:50:19,836 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-16T12:50:19,836 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-16T12:50:19,837 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-16T12:50:19,843 DEBUG [RS:0;0450ab8807f5:43383 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;0450ab8807f5:43383 2024-11-16T12:50:19,843 INFO [RS:0;0450ab8807f5:43383 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-16T12:50:19,843 INFO [RS:0;0450ab8807f5:43383 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-16T12:50:19,843 DEBUG [RS:0;0450ab8807f5:43383 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-16T12:50:19,844 INFO [RS:0;0450ab8807f5:43383 {}] regionserver.HRegionServer(2659): reportForDuty to master=0450ab8807f5,40947,1731761419269 with port=43383, startcode=1731761419478 2024-11-16T12:50:19,844 DEBUG [RS:0;0450ab8807f5:43383 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-16T12:50:19,846 INFO [HMaster-EventLoopGroup-16-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58935, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.7 (auth:SIMPLE), service=RegionServerStatusService 2024-11-16T12:50:19,847 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40947 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 0450ab8807f5,43383,1731761419478 2024-11-16T12:50:19,847 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40947 {}] master.ServerManager(517): Registering regionserver=0450ab8807f5,43383,1731761419478 2024-11-16T12:50:19,848 DEBUG [RS:0;0450ab8807f5:43383 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:44067/user/jenkins/test-data/e9988f0c-fbf5-7de2-dac4-f0429edf6801 2024-11-16T12:50:19,848 DEBUG [RS:0;0450ab8807f5:43383 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:44067 2024-11-16T12:50:19,848 DEBUG [RS:0;0450ab8807f5:43383 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-16T12:50:19,855 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40947-0x10144fbc6b50000, quorum=127.0.0.1:62323, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-16T12:50:19,856 DEBUG [RS:0;0450ab8807f5:43383 {}] zookeeper.ZKUtil(111): regionserver:43383-0x10144fbc6b50001, quorum=127.0.0.1:62323, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/0450ab8807f5,43383,1731761419478 2024-11-16T12:50:19,856 WARN [RS:0;0450ab8807f5:43383 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-16T12:50:19,856 INFO [RS:0;0450ab8807f5:43383 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T12:50:19,856 DEBUG [RS:0;0450ab8807f5:43383 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:44067/user/jenkins/test-data/e9988f0c-fbf5-7de2-dac4-f0429edf6801/WALs/0450ab8807f5,43383,1731761419478 2024-11-16T12:50:19,856 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [0450ab8807f5,43383,1731761419478] 2024-11-16T12:50:19,859 INFO [RS:0;0450ab8807f5:43383 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-16T12:50:19,861 INFO [RS:0;0450ab8807f5:43383 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-16T12:50:19,862 INFO [RS:0;0450ab8807f5:43383 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-16T12:50:19,862 INFO [RS:0;0450ab8807f5:43383 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T12:50:19,862 INFO [RS:0;0450ab8807f5:43383 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-16T12:50:19,862 INFO [RS:0;0450ab8807f5:43383 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-16T12:50:19,862 INFO [RS:0;0450ab8807f5:43383 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-16T12:50:19,863 DEBUG [RS:0;0450ab8807f5:43383 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:50:19,863 DEBUG [RS:0;0450ab8807f5:43383 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:50:19,863 DEBUG [RS:0;0450ab8807f5:43383 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:50:19,863 DEBUG [RS:0;0450ab8807f5:43383 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:50:19,863 DEBUG [RS:0;0450ab8807f5:43383 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:50:19,863 DEBUG [RS:0;0450ab8807f5:43383 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/0450ab8807f5:0, corePoolSize=2, maxPoolSize=2 2024-11-16T12:50:19,863 DEBUG [RS:0;0450ab8807f5:43383 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:50:19,863 DEBUG [RS:0;0450ab8807f5:43383 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:50:19,863 DEBUG [RS:0;0450ab8807f5:43383 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:50:19,863 DEBUG [RS:0;0450ab8807f5:43383 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:50:19,863 DEBUG [RS:0;0450ab8807f5:43383 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:50:19,863 DEBUG [RS:0;0450ab8807f5:43383 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/0450ab8807f5:0, corePoolSize=1, maxPoolSize=1 2024-11-16T12:50:19,863 DEBUG [RS:0;0450ab8807f5:43383 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/0450ab8807f5:0, corePoolSize=3, maxPoolSize=3 2024-11-16T12:50:19,863 DEBUG [RS:0;0450ab8807f5:43383 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/0450ab8807f5:0, corePoolSize=3, maxPoolSize=3 2024-11-16T12:50:19,865 INFO [RS:0;0450ab8807f5:43383 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T12:50:19,865 INFO [RS:0;0450ab8807f5:43383 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T12:50:19,865 INFO [RS:0;0450ab8807f5:43383 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T12:50:19,865 INFO [RS:0;0450ab8807f5:43383 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-16T12:50:19,865 INFO [RS:0;0450ab8807f5:43383 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-16T12:50:19,865 INFO [RS:0;0450ab8807f5:43383 {}] hbase.ChoreService(168): Chore ScheduledChore name=0450ab8807f5,43383,1731761419478-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-16T12:50:19,878 INFO [RS:0;0450ab8807f5:43383 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-16T12:50:19,879 INFO [RS:0;0450ab8807f5:43383 {}] hbase.ChoreService(168): Chore ScheduledChore name=0450ab8807f5,43383,1731761419478-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T12:50:19,879 INFO [RS:0;0450ab8807f5:43383 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T12:50:19,879 INFO [RS:0;0450ab8807f5:43383 {}] regionserver.Replication(171): 0450ab8807f5,43383,1731761419478 started 2024-11-16T12:50:19,891 INFO [RS:0;0450ab8807f5:43383 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T12:50:19,891 INFO [RS:0;0450ab8807f5:43383 {}] regionserver.HRegionServer(1482): Serving as 0450ab8807f5,43383,1731761419478, RpcServer on 0450ab8807f5/172.17.0.2:43383, sessionid=0x10144fbc6b50001 2024-11-16T12:50:19,891 DEBUG [RS:0;0450ab8807f5:43383 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-16T12:50:19,891 DEBUG [RS:0;0450ab8807f5:43383 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 0450ab8807f5,43383,1731761419478 2024-11-16T12:50:19,891 DEBUG [RS:0;0450ab8807f5:43383 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '0450ab8807f5,43383,1731761419478' 2024-11-16T12:50:19,892 DEBUG [RS:0;0450ab8807f5:43383 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-16T12:50:19,892 DEBUG [RS:0;0450ab8807f5:43383 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-16T12:50:19,893 DEBUG [RS:0;0450ab8807f5:43383 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-16T12:50:19,893 DEBUG [RS:0;0450ab8807f5:43383 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-16T12:50:19,893 DEBUG [RS:0;0450ab8807f5:43383 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 0450ab8807f5,43383,1731761419478 2024-11-16T12:50:19,893 DEBUG [RS:0;0450ab8807f5:43383 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '0450ab8807f5,43383,1731761419478' 2024-11-16T12:50:19,893 DEBUG [RS:0;0450ab8807f5:43383 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-16T12:50:19,893 DEBUG [RS:0;0450ab8807f5:43383 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-16T12:50:19,893 DEBUG [RS:0;0450ab8807f5:43383 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-16T12:50:19,893 INFO [RS:0;0450ab8807f5:43383 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-16T12:50:19,893 INFO [RS:0;0450ab8807f5:43383 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-16T12:50:19,988 WARN [0450ab8807f5:40947 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-16T12:50:19,996 INFO [RS:0;0450ab8807f5:43383 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0450ab8807f5%2C43383%2C1731761419478, suffix=, logDir=hdfs://localhost:44067/user/jenkins/test-data/e9988f0c-fbf5-7de2-dac4-f0429edf6801/WALs/0450ab8807f5,43383,1731761419478, archiveDir=hdfs://localhost:44067/user/jenkins/test-data/e9988f0c-fbf5-7de2-dac4-f0429edf6801/oldWALs, maxLogs=32 2024-11-16T12:50:19,996 INFO [RS:0;0450ab8807f5:43383 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0450ab8807f5%2C43383%2C1731761419478.1731761419996 2024-11-16T12:50:20,002 INFO [RS:0;0450ab8807f5:43383 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e9988f0c-fbf5-7de2-dac4-f0429edf6801/WALs/0450ab8807f5,43383,1731761419478/0450ab8807f5%2C43383%2C1731761419478.1731761419996 2024-11-16T12:50:20,007 DEBUG [RS:0;0450ab8807f5:43383 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37489:37489),(127.0.0.1/127.0.0.1:33411:33411)] 2024-11-16T12:50:20,238 DEBUG [0450ab8807f5:40947 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-16T12:50:20,238 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=0450ab8807f5,43383,1731761419478 2024-11-16T12:50:20,239 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 0450ab8807f5,43383,1731761419478, state=OPENING 2024-11-16T12:50:20,269 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-16T12:50:20,318 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40947-0x10144fbc6b50000, quorum=127.0.0.1:62323, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:50:20,318 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43383-0x10144fbc6b50001, quorum=127.0.0.1:62323, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:50:20,318 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-16T12:50:20,318 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T12:50:20,319 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=0450ab8807f5,43383,1731761419478}] 2024-11-16T12:50:20,319 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T12:50:20,431 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:50:20,471 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-16T12:50:20,473 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47517, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-16T12:50:20,476 INFO [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-16T12:50:20,476 INFO [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T12:50:20,477 INFO [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0450ab8807f5%2C43383%2C1731761419478.meta, suffix=.meta, logDir=hdfs://localhost:44067/user/jenkins/test-data/e9988f0c-fbf5-7de2-dac4-f0429edf6801/WALs/0450ab8807f5,43383,1731761419478, archiveDir=hdfs://localhost:44067/user/jenkins/test-data/e9988f0c-fbf5-7de2-dac4-f0429edf6801/oldWALs, maxLogs=32 2024-11-16T12:50:20,478 INFO [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 0450ab8807f5%2C43383%2C1731761419478.meta.1731761420477.meta 2024-11-16T12:50:20,482 INFO [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e9988f0c-fbf5-7de2-dac4-f0429edf6801/WALs/0450ab8807f5,43383,1731761419478/0450ab8807f5%2C43383%2C1731761419478.meta.1731761420477.meta 2024-11-16T12:50:20,491 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33411:33411),(127.0.0.1/127.0.0.1:37489:37489)] 2024-11-16T12:50:20,491 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-16T12:50:20,492 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-16T12:50:20,492 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-16T12:50:20,492 INFO [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-16T12:50:20,492 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-16T12:50:20,492 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T12:50:20,492 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-16T12:50:20,492 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-16T12:50:20,493 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-16T12:50:20,494 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-16T12:50:20,494 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:50:20,495 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T12:50:20,495 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-16T12:50:20,495 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-16T12:50:20,495 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:50:20,496 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T12:50:20,496 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-16T12:50:20,497 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-16T12:50:20,497 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:50:20,497 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T12:50:20,497 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-16T12:50:20,498 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-16T12:50:20,498 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T12:50:20,498 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T12:50:20,498 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-16T12:50:20,499 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44067/user/jenkins/test-data/e9988f0c-fbf5-7de2-dac4-f0429edf6801/data/hbase/meta/1588230740 2024-11-16T12:50:20,500 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44067/user/jenkins/test-data/e9988f0c-fbf5-7de2-dac4-f0429edf6801/data/hbase/meta/1588230740 2024-11-16T12:50:20,501 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-16T12:50:20,501 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-16T12:50:20,501 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-16T12:50:20,502 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-16T12:50:20,503 INFO [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=740096, jitterRate=-0.05891953408718109}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-16T12:50:20,503 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-16T12:50:20,504 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731761420492Writing region info on filesystem at 1731761420492Initializing all the Stores at 1731761420493 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731761420493Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731761420493Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731761420493Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731761420493Cleaning up temporary data from old regions at 1731761420501 (+8 ms)Running coprocessor post-open hooks at 1731761420503 (+2 ms)Region opened successfully at 1731761420503 2024-11-16T12:50:20,504 INFO [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731761420471 2024-11-16T12:50:20,507 DEBUG [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-16T12:50:20,507 INFO [RS_OPEN_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-16T12:50:20,507 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=0450ab8807f5,43383,1731761419478 2024-11-16T12:50:20,508 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 0450ab8807f5,43383,1731761419478, state=OPEN 2024-11-16T12:50:20,566 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43383-0x10144fbc6b50001, quorum=127.0.0.1:62323, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-16T12:50:20,566 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40947-0x10144fbc6b50000, quorum=127.0.0.1:62323, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-16T12:50:20,566 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=0450ab8807f5,43383,1731761419478 2024-11-16T12:50:20,567 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T12:50:20,567 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T12:50:20,569 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-16T12:50:20,569 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=0450ab8807f5,43383,1731761419478 in 249 msec 2024-11-16T12:50:20,571 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-16T12:50:20,571 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 734 msec 2024-11-16T12:50:20,572 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T12:50:20,572 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-16T12:50:20,573 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T12:50:20,573 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=0450ab8807f5,43383,1731761419478, seqNum=-1] 2024-11-16T12:50:20,573 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T12:50:20,574 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44991, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T12:50:20,579 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 777 msec 2024-11-16T12:50:20,579 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731761420579, completionTime=-1 2024-11-16T12:50:20,579 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-16T12:50:20,579 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-16T12:50:20,581 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-16T12:50:20,581 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731761480581 2024-11-16T12:50:20,581 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731761540581 2024-11-16T12:50:20,581 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-11-16T12:50:20,581 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0450ab8807f5,40947,1731761419269-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T12:50:20,581 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0450ab8807f5,40947,1731761419269-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T12:50:20,581 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0450ab8807f5,40947,1731761419269-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T12:50:20,582 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-0450ab8807f5:40947, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T12:50:20,582 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-16T12:50:20,582 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-16T12:50:20,584 DEBUG [master/0450ab8807f5:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-16T12:50:20,585 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.021sec 2024-11-16T12:50:20,585 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-16T12:50:20,585 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-16T12:50:20,585 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-16T12:50:20,585 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-16T12:50:20,586 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-16T12:50:20,586 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0450ab8807f5,40947,1731761419269-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-16T12:50:20,586 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0450ab8807f5,40947,1731761419269-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-16T12:50:20,587 DEBUG [master/0450ab8807f5:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-16T12:50:20,588 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-16T12:50:20,588 INFO [master/0450ab8807f5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0450ab8807f5,40947,1731761419269-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T12:50:20,615 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@73237904, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T12:50:20,616 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 0450ab8807f5,40947,-1 for getting cluster id 2024-11-16T12:50:20,616 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-16T12:50:20,617 DEBUG [HMaster-EventLoopGroup-16-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '517f81ec-742b-4953-a53c-224d20444b39' 2024-11-16T12:50:20,617 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-16T12:50:20,618 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "517f81ec-742b-4953-a53c-224d20444b39" 2024-11-16T12:50:20,618 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1ba6c542, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T12:50:20,618 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [0450ab8807f5,40947,-1] 2024-11-16T12:50:20,618 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-16T12:50:20,619 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T12:50:20,620 INFO [HMaster-EventLoopGroup-16-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51516, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-16T12:50:20,620 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@338da4b3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T12:50:20,621 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T12:50:20,622 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=0450ab8807f5,43383,1731761419478, seqNum=-1] 2024-11-16T12:50:20,622 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T12:50:20,623 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35840, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T12:50:20,625 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=0450ab8807f5,40947,1731761419269 2024-11-16T12:50:20,625 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T12:50:20,628 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-16T12:50:20,628 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T12:50:20,629 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,33749,1731761229240/0450ab8807f5%2C33749%2C1731761229240.meta.1731761230290.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:50:20,630 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=test.com%2C8080%2C1, suffix=, logDir=hdfs://localhost:44067/user/jenkins/test-data/e9988f0c-fbf5-7de2-dac4-f0429edf6801/WALs/test.com,8080,1, archiveDir=hdfs://localhost:44067/user/jenkins/test-data/e9988f0c-fbf5-7de2-dac4-f0429edf6801/oldWALs, maxLogs=32 2024-11-16T12:50:20,631 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1731761420631 2024-11-16T12:50:20,636 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e9988f0c-fbf5-7de2-dac4-f0429edf6801/WALs/test.com,8080,1/test.com%2C8080%2C1.1731761420631 2024-11-16T12:50:20,636 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37489:37489),(127.0.0.1/127.0.0.1:33411:33411)] 2024-11-16T12:50:20,639 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1731761420639 2024-11-16T12:50:20,648 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:50:20,648 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:50:20,648 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:50:20,648 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:50:20,648 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:50:20,648 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e9988f0c-fbf5-7de2-dac4-f0429edf6801/WALs/test.com,8080,1/test.com%2C8080%2C1.1731761420631 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/e9988f0c-fbf5-7de2-dac4-f0429edf6801/WALs/test.com,8080,1/test.com%2C8080%2C1.1731761420639 2024-11-16T12:50:20,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40507 is added to blk_1073741835_1011 (size=93) 2024-11-16T12:50:20,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44511 is added to blk_1073741835_1011 (size=93) 2024-11-16T12:50:20,659 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44067/user/jenkins/test-data/e9988f0c-fbf5-7de2-dac4-f0429edf6801/WALs/test.com,8080,1/test.com%2C8080%2C1.1731761420631 to hdfs://localhost:44067/user/jenkins/test-data/e9988f0c-fbf5-7de2-dac4-f0429edf6801/oldWALs/test.com%2C8080%2C1.1731761420631 2024-11-16T12:50:20,661 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37489:37489),(127.0.0.1/127.0.0.1:33411:33411)] 2024-11-16T12:50:20,661 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:50:20,662 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:50:20,662 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:50:20,662 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:50:20,662 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:50:20,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40507 is added to blk_1073741836_1012 (size=93) 2024-11-16T12:50:20,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44511 is added to blk_1073741836_1012 (size=93) 2024-11-16T12:50:20,665 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/e9988f0c-fbf5-7de2-dac4-f0429edf6801/oldWALs 2024-11-16T12:50:20,665 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog test.com%2C8080%2C1:(num 1731761420639) 2024-11-16T12:50:20,665 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-16T12:50:20,666 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-16T12:50:20,666 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T12:50:20,666 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T12:50:20,666 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T12:50:20,666 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-16T12:50:20,666 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-16T12:50:20,666 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1056006682, stopped=false 2024-11-16T12:50:20,666 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=0450ab8807f5,40947,1731761419269 2024-11-16T12:50:20,714 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40947-0x10144fbc6b50000, quorum=127.0.0.1:62323, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-16T12:50:20,714 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43383-0x10144fbc6b50001, quorum=127.0.0.1:62323, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-16T12:50:20,714 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40947-0x10144fbc6b50000, quorum=127.0.0.1:62323, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:50:20,714 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43383-0x10144fbc6b50001, quorum=127.0.0.1:62323, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:50:20,714 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-16T12:50:20,714 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-16T12:50:20,715 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T12:50:20,715 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T12:50:20,715 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:43383-0x10144fbc6b50001, quorum=127.0.0.1:62323, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T12:50:20,715 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '0450ab8807f5,43383,1731761419478' ***** 2024-11-16T12:50:20,715 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-16T12:50:20,715 INFO [RS:0;0450ab8807f5:43383 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-16T12:50:20,715 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-16T12:50:20,715 INFO [RS:0;0450ab8807f5:43383 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-16T12:50:20,715 INFO [RS:0;0450ab8807f5:43383 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-16T12:50:20,715 INFO [RS:0;0450ab8807f5:43383 {}] regionserver.HRegionServer(959): stopping server 0450ab8807f5,43383,1731761419478 2024-11-16T12:50:20,715 INFO [RS:0;0450ab8807f5:43383 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-16T12:50:20,716 INFO [RS:0;0450ab8807f5:43383 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;0450ab8807f5:43383. 2024-11-16T12:50:20,716 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:40947-0x10144fbc6b50000, quorum=127.0.0.1:62323, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T12:50:20,716 DEBUG [RS:0;0450ab8807f5:43383 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T12:50:20,716 DEBUG [RS:0;0450ab8807f5:43383 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T12:50:20,716 INFO [RS:0;0450ab8807f5:43383 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-16T12:50:20,716 INFO [RS:0;0450ab8807f5:43383 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-16T12:50:20,716 INFO [RS:0;0450ab8807f5:43383 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-16T12:50:20,716 INFO [RS:0;0450ab8807f5:43383 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-16T12:50:20,716 INFO [RS:0;0450ab8807f5:43383 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-16T12:50:20,716 DEBUG [RS:0;0450ab8807f5:43383 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-16T12:50:20,717 DEBUG [RS:0;0450ab8807f5:43383 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-16T12:50:20,717 DEBUG [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-16T12:50:20,717 INFO [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-16T12:50:20,717 DEBUG [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-16T12:50:20,717 DEBUG [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-16T12:50:20,717 DEBUG [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-16T12:50:20,717 INFO [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-16T12:50:20,739 DEBUG [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44067/user/jenkins/test-data/e9988f0c-fbf5-7de2-dac4-f0429edf6801/data/hbase/meta/1588230740/.tmp/ns/b79d166ec96442b3a1be1b6448e705b6 is 43, key is default/ns:d/1731761420575/Put/seqid=0 2024-11-16T12:50:20,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44511 is added to blk_1073741837_1013 (size=5153) 2024-11-16T12:50:20,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40507 is added to blk_1073741837_1013 (size=5153) 2024-11-16T12:50:20,744 INFO [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:44067/user/jenkins/test-data/e9988f0c-fbf5-7de2-dac4-f0429edf6801/data/hbase/meta/1588230740/.tmp/ns/b79d166ec96442b3a1be1b6448e705b6 2024-11-16T12:50:20,749 DEBUG [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44067/user/jenkins/test-data/e9988f0c-fbf5-7de2-dac4-f0429edf6801/data/hbase/meta/1588230740/.tmp/ns/b79d166ec96442b3a1be1b6448e705b6 as hdfs://localhost:44067/user/jenkins/test-data/e9988f0c-fbf5-7de2-dac4-f0429edf6801/data/hbase/meta/1588230740/ns/b79d166ec96442b3a1be1b6448e705b6 2024-11-16T12:50:20,757 INFO [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44067/user/jenkins/test-data/e9988f0c-fbf5-7de2-dac4-f0429edf6801/data/hbase/meta/1588230740/ns/b79d166ec96442b3a1be1b6448e705b6, entries=2, sequenceid=6, filesize=5.0 K 2024-11-16T12:50:20,758 INFO [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 41ms, sequenceid=6, compaction requested=false 2024-11-16T12:50:20,758 DEBUG [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-16T12:50:20,769 DEBUG [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44067/user/jenkins/test-data/e9988f0c-fbf5-7de2-dac4-f0429edf6801/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-16T12:50:20,769 DEBUG [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-16T12:50:20,769 INFO [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-16T12:50:20,770 DEBUG [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731761420717Running coprocessor pre-close hooks at 1731761420717Disabling compacts and flushes for region at 1731761420717Disabling writes for close at 1731761420717Obtaining lock to block concurrent updates at 1731761420717Preparing flush snapshotting stores in 1588230740 at 1731761420717Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1731761420717Flushing stores of hbase:meta,,1.1588230740 at 1731761420718 (+1 ms)Flushing 1588230740/ns: creating writer at 1731761420718Flushing 1588230740/ns: appending metadata at 1731761420738 (+20 ms)Flushing 1588230740/ns: closing flushed file at 1731761420738Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1cbd8ca3: reopening flushed file at 1731761420748 (+10 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 41ms, sequenceid=6, compaction requested=false at 1731761420758 (+10 ms)Writing region close event to WAL at 1731761420759 (+1 ms)Running coprocessor post-close hooks at 1731761420769 (+10 ms)Closed at 1731761420769 2024-11-16T12:50:20,770 DEBUG [RS_CLOSE_META-regionserver/0450ab8807f5:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-16T12:50:20,917 INFO [RS:0;0450ab8807f5:43383 {}] regionserver.HRegionServer(976): stopping server 0450ab8807f5,43383,1731761419478; all regions closed. 2024-11-16T12:50:20,917 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:50:20,917 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:50:20,918 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:50:20,918 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:50:20,918 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:50:20,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44511 is added to blk_1073741834_1010 (size=1152) 2024-11-16T12:50:20,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40507 is added to blk_1073741834_1010 (size=1152) 2024-11-16T12:50:20,921 DEBUG [RS:0;0450ab8807f5:43383 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/e9988f0c-fbf5-7de2-dac4-f0429edf6801/oldWALs 2024-11-16T12:50:20,921 INFO [RS:0;0450ab8807f5:43383 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 0450ab8807f5%2C43383%2C1731761419478.meta:.meta(num 1731761420477) 2024-11-16T12:50:20,922 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:50:20,922 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:50:20,922 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:50:20,922 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:50:20,922 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:50:20,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40507 is added to blk_1073741833_1009 (size=93) 2024-11-16T12:50:20,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44511 is added to blk_1073741833_1009 (size=93) 2024-11-16T12:50:20,925 DEBUG [RS:0;0450ab8807f5:43383 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/e9988f0c-fbf5-7de2-dac4-f0429edf6801/oldWALs 2024-11-16T12:50:20,925 INFO [RS:0;0450ab8807f5:43383 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 0450ab8807f5%2C43383%2C1731761419478:(num 1731761419996) 2024-11-16T12:50:20,925 DEBUG [RS:0;0450ab8807f5:43383 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T12:50:20,925 INFO [RS:0;0450ab8807f5:43383 {}] regionserver.LeaseManager(133): Closed leases 2024-11-16T12:50:20,925 INFO [RS:0;0450ab8807f5:43383 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-16T12:50:20,926 INFO [RS:0;0450ab8807f5:43383 {}] hbase.ChoreService(370): Chore service for: regionserver/0450ab8807f5:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-16T12:50:20,926 INFO [RS:0;0450ab8807f5:43383 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-16T12:50:20,926 INFO [regionserver/0450ab8807f5:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-16T12:50:20,926 INFO [RS:0;0450ab8807f5:43383 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:43383 2024-11-16T12:50:20,959 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40947-0x10144fbc6b50000, quorum=127.0.0.1:62323, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-16T12:50:20,959 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43383-0x10144fbc6b50001, quorum=127.0.0.1:62323, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/0450ab8807f5,43383,1731761419478 2024-11-16T12:50:20,959 INFO [RS:0;0450ab8807f5:43383 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-16T12:50:21,022 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [0450ab8807f5,43383,1731761419478] 2024-11-16T12:50:21,073 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/0450ab8807f5,43383,1731761419478 already deleted, retry=false 2024-11-16T12:50:21,073 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 0450ab8807f5,43383,1731761419478 expired; onlineServers=0 2024-11-16T12:50:21,074 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '0450ab8807f5,40947,1731761419269' ***** 2024-11-16T12:50:21,074 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-16T12:50:21,074 INFO [M:0;0450ab8807f5:40947 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-16T12:50:21,074 INFO [M:0;0450ab8807f5:40947 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-16T12:50:21,074 DEBUG [M:0;0450ab8807f5:40947 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-16T12:50:21,074 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-16T12:50:21,074 DEBUG [M:0;0450ab8807f5:40947 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-16T12:50:21,074 DEBUG [master/0450ab8807f5:0:becomeActiveMaster-HFileCleaner.small.0-1731761419806 {}] cleaner.HFileCleaner(306): Exit Thread[master/0450ab8807f5:0:becomeActiveMaster-HFileCleaner.small.0-1731761419806,5,FailOnTimeoutGroup] 2024-11-16T12:50:21,074 DEBUG [master/0450ab8807f5:0:becomeActiveMaster-HFileCleaner.large.0-1731761419805 {}] cleaner.HFileCleaner(306): Exit Thread[master/0450ab8807f5:0:becomeActiveMaster-HFileCleaner.large.0-1731761419805,5,FailOnTimeoutGroup] 2024-11-16T12:50:21,075 INFO [M:0;0450ab8807f5:40947 {}] hbase.ChoreService(370): Chore service for: master/0450ab8807f5:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-16T12:50:21,075 INFO [M:0;0450ab8807f5:40947 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-16T12:50:21,075 DEBUG [M:0;0450ab8807f5:40947 {}] master.HMaster(1795): Stopping service threads 2024-11-16T12:50:21,075 INFO [M:0;0450ab8807f5:40947 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-16T12:50:21,075 INFO [M:0;0450ab8807f5:40947 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-16T12:50:21,076 INFO [M:0;0450ab8807f5:40947 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-16T12:50:21,076 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-16T12:50:21,115 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40947-0x10144fbc6b50000, quorum=127.0.0.1:62323, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-16T12:50:21,115 DEBUG [M:0;0450ab8807f5:40947 {}] zookeeper.ZKUtil(347): master:40947-0x10144fbc6b50000, quorum=127.0.0.1:62323, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-16T12:50:21,115 WARN [M:0;0450ab8807f5:40947 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-16T12:50:21,115 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40947-0x10144fbc6b50000, quorum=127.0.0.1:62323, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T12:50:21,116 INFO [M:0;0450ab8807f5:40947 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:44067/user/jenkins/test-data/e9988f0c-fbf5-7de2-dac4-f0429edf6801/.lastflushedseqids 2024-11-16T12:50:21,123 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43383-0x10144fbc6b50001, quorum=127.0.0.1:62323, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T12:50:21,123 INFO [RS:0;0450ab8807f5:43383 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-16T12:50:21,123 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43383-0x10144fbc6b50001, quorum=127.0.0.1:62323, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T12:50:21,123 INFO [RS:0;0450ab8807f5:43383 {}] regionserver.HRegionServer(1031): Exiting; stopping=0450ab8807f5,43383,1731761419478; zookeeper connection closed. 2024-11-16T12:50:21,123 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@5eb905de {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@5eb905de 2024-11-16T12:50:21,123 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-16T12:50:21,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44511 is added to blk_1073741838_1014 (size=99) 2024-11-16T12:50:21,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40507 is added to blk_1073741838_1014 (size=99) 2024-11-16T12:50:21,125 INFO [M:0;0450ab8807f5:40947 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-16T12:50:21,125 INFO [M:0;0450ab8807f5:40947 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-16T12:50:21,125 DEBUG [M:0;0450ab8807f5:40947 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-16T12:50:21,125 INFO [M:0;0450ab8807f5:40947 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T12:50:21,125 DEBUG [M:0;0450ab8807f5:40947 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T12:50:21,125 DEBUG [M:0;0450ab8807f5:40947 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-16T12:50:21,125 DEBUG [M:0;0450ab8807f5:40947 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T12:50:21,125 INFO [M:0;0450ab8807f5:40947 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-16T12:50:21,140 DEBUG [M:0;0450ab8807f5:40947 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44067/user/jenkins/test-data/e9988f0c-fbf5-7de2-dac4-f0429edf6801/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/46fb43a9d2ae4a3fb83f547da4c12c39 is 82, key is hbase:meta,,1/info:regioninfo/1731761420507/Put/seqid=0 2024-11-16T12:50:21,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44511 is added to blk_1073741839_1015 (size=5672) 2024-11-16T12:50:21,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40507 is added to blk_1073741839_1015 (size=5672) 2024-11-16T12:50:21,145 INFO [M:0;0450ab8807f5:40947 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:44067/user/jenkins/test-data/e9988f0c-fbf5-7de2-dac4-f0429edf6801/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/46fb43a9d2ae4a3fb83f547da4c12c39 2024-11-16T12:50:21,163 DEBUG [M:0;0450ab8807f5:40947 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44067/user/jenkins/test-data/e9988f0c-fbf5-7de2-dac4-f0429edf6801/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/e4fbdc4c2b5b4f09b8d6f96b6a353212 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1731761420578/Put/seqid=0 2024-11-16T12:50:21,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44511 is added to blk_1073741840_1016 (size=5275) 2024-11-16T12:50:21,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40507 is added to blk_1073741840_1016 (size=5275) 2024-11-16T12:50:21,168 INFO [M:0;0450ab8807f5:40947 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:44067/user/jenkins/test-data/e9988f0c-fbf5-7de2-dac4-f0429edf6801/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/e4fbdc4c2b5b4f09b8d6f96b6a353212 2024-11-16T12:50:21,185 DEBUG [M:0;0450ab8807f5:40947 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44067/user/jenkins/test-data/e9988f0c-fbf5-7de2-dac4-f0429edf6801/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/bd1b4dd2cb0945fbb9b4a9292f85b651 is 69, key is 0450ab8807f5,43383,1731761419478/rs:state/1731761419847/Put/seqid=0 2024-11-16T12:50:21,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40507 is added to blk_1073741841_1017 (size=5156) 2024-11-16T12:50:21,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44511 is added to blk_1073741841_1017 (size=5156) 2024-11-16T12:50:21,190 INFO [M:0;0450ab8807f5:40947 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:44067/user/jenkins/test-data/e9988f0c-fbf5-7de2-dac4-f0429edf6801/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/bd1b4dd2cb0945fbb9b4a9292f85b651 2024-11-16T12:50:21,206 DEBUG [M:0;0450ab8807f5:40947 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44067/user/jenkins/test-data/e9988f0c-fbf5-7de2-dac4-f0429edf6801/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/bd661dfe81af46acb7e52bb3117266a0 is 52, key is load_balancer_on/state:d/1731761420627/Put/seqid=0 2024-11-16T12:50:21,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40507 is added to blk_1073741842_1018 (size=5056) 2024-11-16T12:50:21,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44511 is added to blk_1073741842_1018 (size=5056) 2024-11-16T12:50:21,211 INFO [M:0;0450ab8807f5:40947 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:44067/user/jenkins/test-data/e9988f0c-fbf5-7de2-dac4-f0429edf6801/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/bd661dfe81af46acb7e52bb3117266a0 2024-11-16T12:50:21,215 DEBUG [M:0;0450ab8807f5:40947 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44067/user/jenkins/test-data/e9988f0c-fbf5-7de2-dac4-f0429edf6801/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/46fb43a9d2ae4a3fb83f547da4c12c39 as hdfs://localhost:44067/user/jenkins/test-data/e9988f0c-fbf5-7de2-dac4-f0429edf6801/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/46fb43a9d2ae4a3fb83f547da4c12c39 2024-11-16T12:50:21,219 INFO [M:0;0450ab8807f5:40947 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44067/user/jenkins/test-data/e9988f0c-fbf5-7de2-dac4-f0429edf6801/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/46fb43a9d2ae4a3fb83f547da4c12c39, entries=8, sequenceid=29, filesize=5.5 K 2024-11-16T12:50:21,220 DEBUG [M:0;0450ab8807f5:40947 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44067/user/jenkins/test-data/e9988f0c-fbf5-7de2-dac4-f0429edf6801/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/e4fbdc4c2b5b4f09b8d6f96b6a353212 as hdfs://localhost:44067/user/jenkins/test-data/e9988f0c-fbf5-7de2-dac4-f0429edf6801/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/e4fbdc4c2b5b4f09b8d6f96b6a353212 2024-11-16T12:50:21,223 INFO [M:0;0450ab8807f5:40947 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44067/user/jenkins/test-data/e9988f0c-fbf5-7de2-dac4-f0429edf6801/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/e4fbdc4c2b5b4f09b8d6f96b6a353212, entries=3, sequenceid=29, filesize=5.2 K 2024-11-16T12:50:21,224 DEBUG [M:0;0450ab8807f5:40947 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44067/user/jenkins/test-data/e9988f0c-fbf5-7de2-dac4-f0429edf6801/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/bd1b4dd2cb0945fbb9b4a9292f85b651 as hdfs://localhost:44067/user/jenkins/test-data/e9988f0c-fbf5-7de2-dac4-f0429edf6801/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/bd1b4dd2cb0945fbb9b4a9292f85b651 2024-11-16T12:50:21,227 INFO [M:0;0450ab8807f5:40947 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44067/user/jenkins/test-data/e9988f0c-fbf5-7de2-dac4-f0429edf6801/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/bd1b4dd2cb0945fbb9b4a9292f85b651, entries=1, sequenceid=29, filesize=5.0 K 2024-11-16T12:50:21,228 DEBUG [M:0;0450ab8807f5:40947 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44067/user/jenkins/test-data/e9988f0c-fbf5-7de2-dac4-f0429edf6801/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/bd661dfe81af46acb7e52bb3117266a0 as hdfs://localhost:44067/user/jenkins/test-data/e9988f0c-fbf5-7de2-dac4-f0429edf6801/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/bd661dfe81af46acb7e52bb3117266a0 2024-11-16T12:50:21,231 INFO [M:0;0450ab8807f5:40947 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44067/user/jenkins/test-data/e9988f0c-fbf5-7de2-dac4-f0429edf6801/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/bd661dfe81af46acb7e52bb3117266a0, entries=1, sequenceid=29, filesize=4.9 K 2024-11-16T12:50:21,232 INFO [M:0;0450ab8807f5:40947 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 107ms, sequenceid=29, compaction requested=false 2024-11-16T12:50:21,233 INFO [M:0;0450ab8807f5:40947 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T12:50:21,233 DEBUG [M:0;0450ab8807f5:40947 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731761421125Disabling compacts and flushes for region at 1731761421125Disabling writes for close at 1731761421125Obtaining lock to block concurrent updates at 1731761421125Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731761421125Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1731761421126 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731761421126Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731761421126Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731761421140 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731761421140Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731761421149 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731761421163 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731761421163Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731761421172 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731761421184 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731761421184Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731761421193 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731761421206 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731761421206Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5279bc7a: reopening flushed file at 1731761421215 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1dc4ad7f: reopening flushed file at 1731761421219 (+4 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@671a7afa: reopening flushed file at 1731761421224 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@76bff173: reopening flushed file at 1731761421227 (+3 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 107ms, sequenceid=29, compaction requested=false at 1731761421232 (+5 ms)Writing region close event to WAL at 1731761421233 (+1 ms)Closed at 1731761421233 2024-11-16T12:50:21,234 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:50:21,234 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:50:21,234 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:50:21,234 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:50:21,234 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T12:50:21,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40507 is added to blk_1073741830_1006 (size=10311) 2024-11-16T12:50:21,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44511 is added to blk_1073741830_1006 (size=10311) 2024-11-16T12:50:21,236 INFO [M:0;0450ab8807f5:40947 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-16T12:50:21,236 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-16T12:50:21,236 INFO [M:0;0450ab8807f5:40947 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40947 2024-11-16T12:50:21,237 INFO [M:0;0450ab8807f5:40947 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-16T12:50:21,388 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40947-0x10144fbc6b50000, quorum=127.0.0.1:62323, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T12:50:21,388 INFO [M:0;0450ab8807f5:40947 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-16T12:50:21,388 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40947-0x10144fbc6b50000, quorum=127.0.0.1:62323, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T12:50:21,390 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7b2e282d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T12:50:21,391 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@31bc17f3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T12:50:21,391 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T12:50:21,391 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4a03636e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T12:50:21,391 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@59dd2dec{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0da54629-5700-375f-9042-9db10827b77c/hadoop.log.dir/,STOPPED} 2024-11-16T12:50:21,392 WARN [BP-2127107745-172.17.0.2-1731761417512 heartbeating to localhost/127.0.0.1:44067 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T12:50:21,392 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T12:50:21,392 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T12:50:21,392 WARN [BP-2127107745-172.17.0.2-1731761417512 heartbeating to localhost/127.0.0.1:44067 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2127107745-172.17.0.2-1731761417512 (Datanode Uuid b5667f6d-ec32-4913-aa7f-9afeb4a617c3) service to localhost/127.0.0.1:44067 2024-11-16T12:50:21,392 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0da54629-5700-375f-9042-9db10827b77c/cluster_ba51f5e4-209e-d688-5b2e-a309fa8086a8/data/data3/current/BP-2127107745-172.17.0.2-1731761417512 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T12:50:21,393 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0da54629-5700-375f-9042-9db10827b77c/cluster_ba51f5e4-209e-d688-5b2e-a309fa8086a8/data/data4/current/BP-2127107745-172.17.0.2-1731761417512 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T12:50:21,393 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T12:50:21,396 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4c9b811e{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T12:50:21,397 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@548e17de{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T12:50:21,397 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T12:50:21,397 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5ae1ce13{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T12:50:21,397 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@61ab51b1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0da54629-5700-375f-9042-9db10827b77c/hadoop.log.dir/,STOPPED} 2024-11-16T12:50:21,398 WARN [BP-2127107745-172.17.0.2-1731761417512 heartbeating to localhost/127.0.0.1:44067 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T12:50:21,398 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T12:50:21,398 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T12:50:21,398 WARN [BP-2127107745-172.17.0.2-1731761417512 heartbeating to localhost/127.0.0.1:44067 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2127107745-172.17.0.2-1731761417512 (Datanode Uuid 290d6b3c-8743-4b3e-ba66-ad240659567d) service to localhost/127.0.0.1:44067 2024-11-16T12:50:21,398 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0da54629-5700-375f-9042-9db10827b77c/cluster_ba51f5e4-209e-d688-5b2e-a309fa8086a8/data/data1/current/BP-2127107745-172.17.0.2-1731761417512 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T12:50:21,398 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0da54629-5700-375f-9042-9db10827b77c/cluster_ba51f5e4-209e-d688-5b2e-a309fa8086a8/data/data2/current/BP-2127107745-172.17.0.2-1731761417512 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T12:50:21,399 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T12:50:21,404 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5fea8446{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-16T12:50:21,405 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@413a6699{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T12:50:21,405 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T12:50:21,405 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1b7a9b88{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T12:50:21,405 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@17951be7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0da54629-5700-375f-9042-9db10827b77c/hadoop.log.dir/,STOPPED} 2024-11-16T12:50:21,411 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-16T12:50:21,431 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39393/user/jenkins/test-data/22f6e026-0b5c-511b-1a71-886abf60d628/WALs/0450ab8807f5,35663,1731761230502/0450ab8807f5%2C35663%2C1731761230502.1731761230742 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T12:50:21,442 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-16T12:50:21,453 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=271 (was 232) Potentially hanging thread: nioEventLoopGroup-42-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44067 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:44067 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:44067 from jenkins.hfs.7 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-45-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:44067 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-44-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/0450ab8807f5:0.leaseChecker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.regionserver.LeaseManager.run(LeaseManager.java:82) Potentially hanging thread: nioEventLoopGroup-45-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44067 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:44067 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44067 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.7@localhost:44067 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=537 (was 514) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=193 (was 175) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=3761 (was 4051)