2024-11-16 20:33:38,884 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-16 20:33:38,901 main DEBUG Took 0.014881 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-16 20:33:38,901 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-16 20:33:38,902 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-16 20:33:38,903 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-16 20:33:38,904 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-16 20:33:38,910 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-16 20:33:38,922 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-16 20:33:38,924 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-16 20:33:38,925 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-16 20:33:38,925 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-16 20:33:38,926 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-16 20:33:38,926 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-16 20:33:38,927 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-16 20:33:38,928 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-16 20:33:38,929 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-16 20:33:38,929 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-16 20:33:38,930 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-16 20:33:38,930 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-16 20:33:38,931 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-16 20:33:38,931 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-16 20:33:38,932 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-16 20:33:38,932 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-16 20:33:38,933 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-16 20:33:38,933 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-16 20:33:38,933 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-16 20:33:38,934 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-16 20:33:38,934 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-16 20:33:38,935 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-16 20:33:38,935 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-16 20:33:38,935 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-16 20:33:38,936 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-16 20:33:38,936 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-16 20:33:38,938 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-16 20:33:38,940 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-16 20:33:38,942 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-16 20:33:38,943 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-16 20:33:38,944 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-16 20:33:38,944 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-16 20:33:38,955 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-16 20:33:38,958 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-16 20:33:38,960 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-16 20:33:38,961 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-16 20:33:38,961 main DEBUG createAppenders(={Console}) 2024-11-16 20:33:38,962 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca initialized 2024-11-16 20:33:38,963 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-16 20:33:38,963 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca OK. 2024-11-16 20:33:38,964 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-16 20:33:38,964 main DEBUG OutputStream closed 2024-11-16 20:33:38,965 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-16 20:33:38,965 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-16 20:33:38,965 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@6404f418 OK 2024-11-16 20:33:39,050 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-16 20:33:39,052 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-16 20:33:39,053 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-16 20:33:39,054 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-16 20:33:39,055 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-16 20:33:39,055 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-16 20:33:39,055 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-16 20:33:39,056 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-16 20:33:39,056 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-16 20:33:39,056 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-16 20:33:39,057 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-16 20:33:39,057 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-16 20:33:39,057 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-16 20:33:39,058 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-16 20:33:39,058 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-16 20:33:39,058 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-16 20:33:39,058 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-16 20:33:39,059 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-16 20:33:39,061 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-16 20:33:39,061 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-logging/target/hbase-logging-3.0.0-beta-2-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@6dab9b6d) with optional ClassLoader: null 2024-11-16 20:33:39,062 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-16 20:33:39,062 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@6dab9b6d] started OK. 2024-11-16T20:33:39,329 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2978a67c-c3f1-d077-7f16-9a78bc3638c9 2024-11-16 20:33:39,332 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-16 20:33:39,332 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-16T20:33:39,340 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestLogRolling timeout: 13 mins 2024-11-16T20:33:39,374 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=12, OpenFileDescriptor=287, MaxFileDescriptor=1048576, SystemLoadAverage=125, ProcessCount=11, AvailableMemoryMB=5442 2024-11-16T20:33:39,377 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-16T20:33:39,391 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2978a67c-c3f1-d077-7f16-9a78bc3638c9/cluster_e01d7346-7a30-214e-8c60-80b6047028f5, deleteOnExit=true 2024-11-16T20:33:39,391 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-16T20:33:39,392 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2978a67c-c3f1-d077-7f16-9a78bc3638c9/test.cache.data in system properties and HBase conf 2024-11-16T20:33:39,393 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2978a67c-c3f1-d077-7f16-9a78bc3638c9/hadoop.tmp.dir in system properties and HBase conf 2024-11-16T20:33:39,394 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2978a67c-c3f1-d077-7f16-9a78bc3638c9/hadoop.log.dir in system properties and HBase conf 2024-11-16T20:33:39,394 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2978a67c-c3f1-d077-7f16-9a78bc3638c9/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-16T20:33:39,395 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2978a67c-c3f1-d077-7f16-9a78bc3638c9/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-16T20:33:39,395 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-16T20:33:39,490 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-16T20:33:39,593 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-16T20:33:39,599 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2978a67c-c3f1-d077-7f16-9a78bc3638c9/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-16T20:33:39,600 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2978a67c-c3f1-d077-7f16-9a78bc3638c9/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-16T20:33:39,600 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2978a67c-c3f1-d077-7f16-9a78bc3638c9/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-16T20:33:39,601 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2978a67c-c3f1-d077-7f16-9a78bc3638c9/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-16T20:33:39,602 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2978a67c-c3f1-d077-7f16-9a78bc3638c9/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-16T20:33:39,603 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2978a67c-c3f1-d077-7f16-9a78bc3638c9/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-16T20:33:39,603 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2978a67c-c3f1-d077-7f16-9a78bc3638c9/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-16T20:33:39,604 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2978a67c-c3f1-d077-7f16-9a78bc3638c9/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-16T20:33:39,605 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2978a67c-c3f1-d077-7f16-9a78bc3638c9/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-16T20:33:39,606 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2978a67c-c3f1-d077-7f16-9a78bc3638c9/nfs.dump.dir in system properties and HBase conf 2024-11-16T20:33:39,606 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2978a67c-c3f1-d077-7f16-9a78bc3638c9/java.io.tmpdir in system properties and HBase conf 2024-11-16T20:33:39,607 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2978a67c-c3f1-d077-7f16-9a78bc3638c9/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-16T20:33:39,608 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2978a67c-c3f1-d077-7f16-9a78bc3638c9/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-16T20:33:39,609 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2978a67c-c3f1-d077-7f16-9a78bc3638c9/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-16T20:33:40,098 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-16T20:33:40,986 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-16T20:33:41,058 INFO [Time-limited test {}] log.Log(170): Logging initialized @2889ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-16T20:33:41,129 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T20:33:41,200 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T20:33:41,235 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T20:33:41,236 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T20:33:41,238 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-16T20:33:41,259 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T20:33:41,263 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2978a67c-c3f1-d077-7f16-9a78bc3638c9/hadoop.log.dir/,AVAILABLE} 2024-11-16T20:33:41,264 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T20:33:41,476 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@735fa16a{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2978a67c-c3f1-d077-7f16-9a78bc3638c9/java.io.tmpdir/jetty-localhost-44479-hadoop-hdfs-3_4_1-tests_jar-_-any-12502703449229020784/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-16T20:33:41,481 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:44479} 2024-11-16T20:33:41,482 INFO [Time-limited test {}] server.Server(415): Started @3314ms 2024-11-16T20:33:41,505 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-16T20:33:42,079 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T20:33:42,087 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T20:33:42,088 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T20:33:42,088 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T20:33:42,088 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-16T20:33:42,089 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6355b7f5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2978a67c-c3f1-d077-7f16-9a78bc3638c9/hadoop.log.dir/,AVAILABLE} 2024-11-16T20:33:42,090 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@60d13ec7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T20:33:42,190 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6c2fdbac{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2978a67c-c3f1-d077-7f16-9a78bc3638c9/java.io.tmpdir/jetty-localhost-33433-hadoop-hdfs-3_4_1-tests_jar-_-any-10093553649746292465/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T20:33:42,192 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@461c65fd{HTTP/1.1, (http/1.1)}{localhost:33433} 2024-11-16T20:33:42,192 INFO [Time-limited test {}] server.Server(415): Started @4024ms 2024-11-16T20:33:42,258 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T20:33:42,362 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T20:33:42,369 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T20:33:42,371 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T20:33:42,371 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T20:33:42,371 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-16T20:33:42,372 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3369fbc0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2978a67c-c3f1-d077-7f16-9a78bc3638c9/hadoop.log.dir/,AVAILABLE} 2024-11-16T20:33:42,373 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3ec7bf2e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T20:33:42,487 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1467625d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2978a67c-c3f1-d077-7f16-9a78bc3638c9/java.io.tmpdir/jetty-localhost-46213-hadoop-hdfs-3_4_1-tests_jar-_-any-17594391299096332402/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T20:33:42,488 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@675921ed{HTTP/1.1, (http/1.1)}{localhost:46213} 2024-11-16T20:33:42,488 INFO [Time-limited test {}] server.Server(415): Started @4321ms 2024-11-16T20:33:42,491 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T20:33:43,809 WARN [Thread-98 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2978a67c-c3f1-d077-7f16-9a78bc3638c9/cluster_e01d7346-7a30-214e-8c60-80b6047028f5/data/data3/current/BP-1072893873-172.17.0.2-1731789220193/current, will proceed with Du for space computation calculation, 2024-11-16T20:33:43,809 WARN [Thread-100 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2978a67c-c3f1-d077-7f16-9a78bc3638c9/cluster_e01d7346-7a30-214e-8c60-80b6047028f5/data/data1/current/BP-1072893873-172.17.0.2-1731789220193/current, will proceed with Du for space computation calculation, 2024-11-16T20:33:43,809 WARN [Thread-101 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2978a67c-c3f1-d077-7f16-9a78bc3638c9/cluster_e01d7346-7a30-214e-8c60-80b6047028f5/data/data2/current/BP-1072893873-172.17.0.2-1731789220193/current, will proceed with Du for space computation calculation, 2024-11-16T20:33:43,809 WARN [Thread-99 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2978a67c-c3f1-d077-7f16-9a78bc3638c9/cluster_e01d7346-7a30-214e-8c60-80b6047028f5/data/data4/current/BP-1072893873-172.17.0.2-1731789220193/current, will proceed with Du for space computation calculation, 2024-11-16T20:33:43,848 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T20:33:43,848 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T20:33:43,896 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9c211913e7d7e09f with lease ID 0x2ac391fba4ce0124: Processing first storage report for DS-051aaca9-c637-4f47-8502-a207905dede7 from datanode DatanodeRegistration(127.0.0.1:34255, datanodeUuid=c774aad7-0834-41ec-a4bb-98fd9c87ad13, infoPort=34099, infoSecurePort=0, ipcPort=34085, storageInfo=lv=-57;cid=testClusterID;nsid=1234331525;c=1731789220193) 2024-11-16T20:33:43,897 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9c211913e7d7e09f with lease ID 0x2ac391fba4ce0124: from storage DS-051aaca9-c637-4f47-8502-a207905dede7 node DatanodeRegistration(127.0.0.1:34255, datanodeUuid=c774aad7-0834-41ec-a4bb-98fd9c87ad13, infoPort=34099, infoSecurePort=0, ipcPort=34085, storageInfo=lv=-57;cid=testClusterID;nsid=1234331525;c=1731789220193), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-16T20:33:43,897 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x87296c1ff1375579 with lease ID 0x2ac391fba4ce0125: Processing first storage report for DS-8b4ce1c8-2add-4a01-ba07-62af67e7d0be from datanode DatanodeRegistration(127.0.0.1:41759, datanodeUuid=481a69a6-83e4-400d-b7e6-ca0715204381, infoPort=43385, infoSecurePort=0, ipcPort=46555, storageInfo=lv=-57;cid=testClusterID;nsid=1234331525;c=1731789220193) 2024-11-16T20:33:43,897 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x87296c1ff1375579 with lease ID 0x2ac391fba4ce0125: from storage DS-8b4ce1c8-2add-4a01-ba07-62af67e7d0be node DatanodeRegistration(127.0.0.1:41759, datanodeUuid=481a69a6-83e4-400d-b7e6-ca0715204381, infoPort=43385, infoSecurePort=0, ipcPort=46555, storageInfo=lv=-57;cid=testClusterID;nsid=1234331525;c=1731789220193), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T20:33:43,898 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9c211913e7d7e09f with lease ID 0x2ac391fba4ce0124: Processing first storage report for DS-4e9e9585-3438-4b1a-a52c-f020a3bb8e66 from datanode DatanodeRegistration(127.0.0.1:34255, datanodeUuid=c774aad7-0834-41ec-a4bb-98fd9c87ad13, infoPort=34099, infoSecurePort=0, ipcPort=34085, storageInfo=lv=-57;cid=testClusterID;nsid=1234331525;c=1731789220193) 2024-11-16T20:33:43,898 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9c211913e7d7e09f with lease ID 0x2ac391fba4ce0124: from storage DS-4e9e9585-3438-4b1a-a52c-f020a3bb8e66 node DatanodeRegistration(127.0.0.1:34255, datanodeUuid=c774aad7-0834-41ec-a4bb-98fd9c87ad13, infoPort=34099, infoSecurePort=0, ipcPort=34085, storageInfo=lv=-57;cid=testClusterID;nsid=1234331525;c=1731789220193), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T20:33:43,898 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x87296c1ff1375579 with lease ID 0x2ac391fba4ce0125: Processing first storage report for DS-138ff0d3-8526-4796-9fb4-b38758fe9e95 from datanode DatanodeRegistration(127.0.0.1:41759, datanodeUuid=481a69a6-83e4-400d-b7e6-ca0715204381, infoPort=43385, infoSecurePort=0, ipcPort=46555, storageInfo=lv=-57;cid=testClusterID;nsid=1234331525;c=1731789220193) 2024-11-16T20:33:43,898 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x87296c1ff1375579 with lease ID 0x2ac391fba4ce0125: from storage DS-138ff0d3-8526-4796-9fb4-b38758fe9e95 node DatanodeRegistration(127.0.0.1:41759, datanodeUuid=481a69a6-83e4-400d-b7e6-ca0715204381, infoPort=43385, infoSecurePort=0, ipcPort=46555, storageInfo=lv=-57;cid=testClusterID;nsid=1234331525;c=1731789220193), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T20:33:43,913 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2978a67c-c3f1-d077-7f16-9a78bc3638c9 2024-11-16T20:33:43,979 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2978a67c-c3f1-d077-7f16-9a78bc3638c9/cluster_e01d7346-7a30-214e-8c60-80b6047028f5/zookeeper_0, clientPort=60663, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2978a67c-c3f1-d077-7f16-9a78bc3638c9/cluster_e01d7346-7a30-214e-8c60-80b6047028f5/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2978a67c-c3f1-d077-7f16-9a78bc3638c9/cluster_e01d7346-7a30-214e-8c60-80b6047028f5/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-16T20:33:43,988 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=60663 2024-11-16T20:33:44,002 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T20:33:44,005 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T20:33:44,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34255 is added to blk_1073741825_1001 (size=7) 2024-11-16T20:33:44,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41759 is added to blk_1073741825_1001 (size=7) 2024-11-16T20:33:44,638 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc with version=8 2024-11-16T20:33:44,638 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/hbase-staging 2024-11-16T20:33:44,725 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-16T20:33:44,955 INFO [Time-limited test {}] client.ConnectionUtils(128): master/40c018648b21:0 server-side Connection retries=45 2024-11-16T20:33:44,963 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T20:33:44,964 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-16T20:33:44,968 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-16T20:33:44,968 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T20:33:44,968 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-16T20:33:45,092 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-16T20:33:45,147 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-16T20:33:45,156 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-16T20:33:45,159 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-16T20:33:45,182 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 102722 (auto-detected) 2024-11-16T20:33:45,182 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-16T20:33:45,199 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34669 2024-11-16T20:33:45,222 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:34669 connecting to ZooKeeper ensemble=127.0.0.1:60663 2024-11-16T20:33:45,294 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:346690x0, quorum=127.0.0.1:60663, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-16T20:33:45,297 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:34669-0x101455a73c80000 connected 2024-11-16T20:33:45,385 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T20:33:45,389 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T20:33:45,401 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34669-0x101455a73c80000, quorum=127.0.0.1:60663, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T20:33:45,406 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc, hbase.cluster.distributed=false 2024-11-16T20:33:45,431 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34669-0x101455a73c80000, quorum=127.0.0.1:60663, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-16T20:33:45,439 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34669 2024-11-16T20:33:45,440 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34669 2024-11-16T20:33:45,440 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34669 2024-11-16T20:33:45,441 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34669 2024-11-16T20:33:45,441 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34669 2024-11-16T20:33:45,539 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/40c018648b21:0 server-side Connection retries=45 2024-11-16T20:33:45,541 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T20:33:45,541 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-16T20:33:45,541 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-16T20:33:45,541 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T20:33:45,541 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-16T20:33:45,544 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-16T20:33:45,547 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-16T20:33:45,547 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41609 2024-11-16T20:33:45,549 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:41609 connecting to ZooKeeper ensemble=127.0.0.1:60663 2024-11-16T20:33:45,550 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T20:33:45,555 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T20:33:45,571 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:416090x0, quorum=127.0.0.1:60663, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-16T20:33:45,571 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:416090x0, quorum=127.0.0.1:60663, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T20:33:45,571 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41609-0x101455a73c80001 connected 2024-11-16T20:33:45,575 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-16T20:33:45,584 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-16T20:33:45,586 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41609-0x101455a73c80001, quorum=127.0.0.1:60663, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-16T20:33:45,592 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41609-0x101455a73c80001, quorum=127.0.0.1:60663, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-16T20:33:45,593 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41609 2024-11-16T20:33:45,594 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41609 2024-11-16T20:33:45,594 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41609 2024-11-16T20:33:45,595 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41609 2024-11-16T20:33:45,596 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41609 2024-11-16T20:33:45,612 DEBUG [M:0;40c018648b21:34669 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;40c018648b21:34669 2024-11-16T20:33:45,613 INFO [master/40c018648b21:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/40c018648b21,34669,1731789224801 2024-11-16T20:33:45,623 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41609-0x101455a73c80001, quorum=127.0.0.1:60663, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T20:33:45,623 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34669-0x101455a73c80000, quorum=127.0.0.1:60663, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T20:33:45,625 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34669-0x101455a73c80000, quorum=127.0.0.1:60663, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/40c018648b21,34669,1731789224801 2024-11-16T20:33:45,654 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41609-0x101455a73c80001, quorum=127.0.0.1:60663, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-16T20:33:45,654 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34669-0x101455a73c80000, quorum=127.0.0.1:60663, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:33:45,655 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41609-0x101455a73c80001, quorum=127.0.0.1:60663, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:33:45,656 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34669-0x101455a73c80000, quorum=127.0.0.1:60663, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-16T20:33:45,657 INFO [master/40c018648b21:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/40c018648b21,34669,1731789224801 from backup master directory 2024-11-16T20:33:45,665 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34669-0x101455a73c80000, quorum=127.0.0.1:60663, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/40c018648b21,34669,1731789224801 2024-11-16T20:33:45,665 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41609-0x101455a73c80001, quorum=127.0.0.1:60663, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T20:33:45,665 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34669-0x101455a73c80000, quorum=127.0.0.1:60663, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T20:33:45,666 WARN [master/40c018648b21:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-16T20:33:45,666 INFO [master/40c018648b21:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=40c018648b21,34669,1731789224801 2024-11-16T20:33:45,668 INFO [master/40c018648b21:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-16T20:33:45,669 INFO [master/40c018648b21:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-16T20:33:45,722 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/hbase.id] with ID: f1d5dc12-4a5c-46aa-8a74-967dcbd7f113 2024-11-16T20:33:45,722 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/.tmp/hbase.id 2024-11-16T20:33:45,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34255 is added to blk_1073741826_1002 (size=42) 2024-11-16T20:33:45,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41759 is added to blk_1073741826_1002 (size=42) 2024-11-16T20:33:45,734 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/.tmp/hbase.id]:[hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/hbase.id] 2024-11-16T20:33:45,780 INFO [master/40c018648b21:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T20:33:45,785 INFO [master/40c018648b21:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-16T20:33:45,803 INFO [master/40c018648b21:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 17ms. 2024-11-16T20:33:45,834 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34669-0x101455a73c80000, quorum=127.0.0.1:60663, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:33:45,834 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41609-0x101455a73c80001, quorum=127.0.0.1:60663, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:33:45,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41759 is added to blk_1073741827_1003 (size=196) 2024-11-16T20:33:45,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34255 is added to blk_1073741827_1003 (size=196) 2024-11-16T20:33:45,872 INFO [master/40c018648b21:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-16T20:33:45,875 INFO [master/40c018648b21:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-16T20:33:45,883 INFO [master/40c018648b21:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T20:33:45,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41759 is added to blk_1073741828_1004 (size=1189) 2024-11-16T20:33:45,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34255 is added to blk_1073741828_1004 (size=1189) 2024-11-16T20:33:45,934 INFO [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/MasterData/data/master/store 2024-11-16T20:33:45,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34255 is added to blk_1073741829_1005 (size=34) 2024-11-16T20:33:45,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41759 is added to blk_1073741829_1005 (size=34) 2024-11-16T20:33:45,958 INFO [master/40c018648b21:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-16T20:33:45,960 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T20:33:45,961 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-16T20:33:45,962 INFO [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T20:33:45,962 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T20:33:45,963 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-16T20:33:45,963 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T20:33:45,963 INFO [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T20:33:45,965 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731789225961Disabling compacts and flushes for region at 1731789225961Disabling writes for close at 1731789225963 (+2 ms)Writing region close event to WAL at 1731789225963Closed at 1731789225963 2024-11-16T20:33:45,967 WARN [master/40c018648b21:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/MasterData/data/master/store/.initializing 2024-11-16T20:33:45,967 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/MasterData/WALs/40c018648b21,34669,1731789224801 2024-11-16T20:33:45,989 INFO [master/40c018648b21:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=40c018648b21%2C34669%2C1731789224801, suffix=, logDir=hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/MasterData/WALs/40c018648b21,34669,1731789224801, archiveDir=hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/MasterData/oldWALs, maxLogs=10 2024-11-16T20:33:45,997 INFO [master/40c018648b21:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 40c018648b21%2C34669%2C1731789224801.1731789225993 2024-11-16T20:33:46,015 INFO [master/40c018648b21:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/MasterData/WALs/40c018648b21,34669,1731789224801/40c018648b21%2C34669%2C1731789224801.1731789225993 2024-11-16T20:33:46,022 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43385:43385),(127.0.0.1/127.0.0.1:34099:34099)] 2024-11-16T20:33:46,023 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-16T20:33:46,024 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T20:33:46,028 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T20:33:46,030 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T20:33:46,067 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T20:33:46,091 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-16T20:33:46,094 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:33:46,097 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T20:33:46,097 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T20:33:46,100 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-16T20:33:46,100 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:33:46,102 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T20:33:46,102 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T20:33:46,105 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-16T20:33:46,105 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:33:46,106 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T20:33:46,106 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T20:33:46,109 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-16T20:33:46,109 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:33:46,110 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T20:33:46,110 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T20:33:46,114 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-16T20:33:46,115 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-16T20:33:46,120 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T20:33:46,120 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T20:33:46,123 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-16T20:33:46,126 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T20:33:46,130 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T20:33:46,131 INFO [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=805392, jitterRate=0.024109750986099243}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-16T20:33:46,139 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731789226045Initializing all the Stores at 1731789226047 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731789226047Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731789226048 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731789226049 (+1 ms)Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731789226049Cleaning up temporary data from old regions at 1731789226120 (+71 ms)Region opened successfully at 1731789226139 (+19 ms) 2024-11-16T20:33:46,141 INFO [master/40c018648b21:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-16T20:33:46,174 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@61e81f51, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=40c018648b21/172.17.0.2:0 2024-11-16T20:33:46,201 INFO [master/40c018648b21:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-16T20:33:46,211 INFO [master/40c018648b21:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-16T20:33:46,211 INFO [master/40c018648b21:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-16T20:33:46,214 INFO [master/40c018648b21:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-16T20:33:46,215 INFO [master/40c018648b21:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-16T20:33:46,220 INFO [master/40c018648b21:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 4 msec 2024-11-16T20:33:46,220 INFO [master/40c018648b21:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-16T20:33:46,243 INFO [master/40c018648b21:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-16T20:33:46,252 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34669-0x101455a73c80000, quorum=127.0.0.1:60663, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-16T20:33:46,265 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-16T20:33:46,267 INFO [master/40c018648b21:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-16T20:33:46,269 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34669-0x101455a73c80000, quorum=127.0.0.1:60663, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-16T20:33:46,275 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-16T20:33:46,278 INFO [master/40c018648b21:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-16T20:33:46,282 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34669-0x101455a73c80000, quorum=127.0.0.1:60663, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-16T20:33:46,286 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-16T20:33:46,288 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34669-0x101455a73c80000, quorum=127.0.0.1:60663, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-16T20:33:46,297 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-16T20:33:46,317 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34669-0x101455a73c80000, quorum=127.0.0.1:60663, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-16T20:33:46,328 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-16T20:33:46,339 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34669-0x101455a73c80000, quorum=127.0.0.1:60663, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-16T20:33:46,339 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41609-0x101455a73c80001, quorum=127.0.0.1:60663, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-16T20:33:46,339 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34669-0x101455a73c80000, quorum=127.0.0.1:60663, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:33:46,339 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41609-0x101455a73c80001, quorum=127.0.0.1:60663, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:33:46,344 INFO [master/40c018648b21:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=40c018648b21,34669,1731789224801, sessionid=0x101455a73c80000, setting cluster-up flag (Was=false) 2024-11-16T20:33:46,370 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41609-0x101455a73c80001, quorum=127.0.0.1:60663, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:33:46,370 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34669-0x101455a73c80000, quorum=127.0.0.1:60663, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:33:46,402 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-16T20:33:46,404 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=40c018648b21,34669,1731789224801 2024-11-16T20:33:46,423 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34669-0x101455a73c80000, quorum=127.0.0.1:60663, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:33:46,423 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41609-0x101455a73c80001, quorum=127.0.0.1:60663, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:33:46,455 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-16T20:33:46,460 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=40c018648b21,34669,1731789224801 2024-11-16T20:33:46,469 INFO [master/40c018648b21:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-16T20:33:46,501 INFO [RS:0;40c018648b21:41609 {}] regionserver.HRegionServer(746): ClusterId : f1d5dc12-4a5c-46aa-8a74-967dcbd7f113 2024-11-16T20:33:46,503 DEBUG [RS:0;40c018648b21:41609 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-16T20:33:46,520 DEBUG [RS:0;40c018648b21:41609 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-16T20:33:46,520 DEBUG [RS:0;40c018648b21:41609 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-16T20:33:46,529 DEBUG [RS:0;40c018648b21:41609 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-16T20:33:46,530 DEBUG [RS:0;40c018648b21:41609 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@43b17866, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=40c018648b21/172.17.0.2:0 2024-11-16T20:33:46,538 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-16T20:33:46,544 DEBUG [RS:0;40c018648b21:41609 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;40c018648b21:41609 2024-11-16T20:33:46,546 INFO [RS:0;40c018648b21:41609 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-16T20:33:46,547 INFO [master/40c018648b21:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-16T20:33:46,547 INFO [RS:0;40c018648b21:41609 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-16T20:33:46,547 DEBUG [RS:0;40c018648b21:41609 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-16T20:33:46,549 INFO [RS:0;40c018648b21:41609 {}] regionserver.HRegionServer(2659): reportForDuty to master=40c018648b21,34669,1731789224801 with port=41609, startcode=1731789225506 2024-11-16T20:33:46,552 INFO [master/40c018648b21:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-16T20:33:46,558 DEBUG [RS:0;40c018648b21:41609 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-16T20:33:46,557 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 40c018648b21,34669,1731789224801 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-16T20:33:46,564 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/40c018648b21:0, corePoolSize=5, maxPoolSize=5 2024-11-16T20:33:46,564 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/40c018648b21:0, corePoolSize=5, maxPoolSize=5 2024-11-16T20:33:46,564 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/40c018648b21:0, corePoolSize=5, maxPoolSize=5 2024-11-16T20:33:46,564 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/40c018648b21:0, corePoolSize=5, maxPoolSize=5 2024-11-16T20:33:46,564 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/40c018648b21:0, corePoolSize=10, maxPoolSize=10 2024-11-16T20:33:46,564 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:33:46,565 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/40c018648b21:0, corePoolSize=2, maxPoolSize=2 2024-11-16T20:33:46,565 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:33:46,569 INFO [master/40c018648b21:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731789256569 2024-11-16T20:33:46,570 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T20:33:46,571 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-16T20:33:46,571 INFO [master/40c018648b21:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-16T20:33:46,572 INFO [master/40c018648b21:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-16T20:33:46,576 INFO [master/40c018648b21:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-16T20:33:46,576 INFO [master/40c018648b21:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-16T20:33:46,577 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:33:46,577 INFO [master/40c018648b21:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-16T20:33:46,577 INFO [master/40c018648b21:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-16T20:33:46,577 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-16T20:33:46,578 INFO [master/40c018648b21:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-16T20:33:46,583 INFO [master/40c018648b21:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-16T20:33:46,584 INFO [master/40c018648b21:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-16T20:33:46,584 INFO [master/40c018648b21:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-16T20:33:46,588 INFO [master/40c018648b21:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-16T20:33:46,589 INFO [master/40c018648b21:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-16T20:33:46,592 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/40c018648b21:0:becomeActiveMaster-HFileCleaner.large.0-1731789226590,5,FailOnTimeoutGroup] 2024-11-16T20:33:46,594 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/40c018648b21:0:becomeActiveMaster-HFileCleaner.small.0-1731789226593,5,FailOnTimeoutGroup] 2024-11-16T20:33:46,595 INFO [master/40c018648b21:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-16T20:33:46,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41759 is added to blk_1073741831_1007 (size=1321) 2024-11-16T20:33:46,595 INFO [master/40c018648b21:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-16T20:33:46,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34255 is added to blk_1073741831_1007 (size=1321) 2024-11-16T20:33:46,596 INFO [master/40c018648b21:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-16T20:33:46,597 INFO [master/40c018648b21:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-16T20:33:46,598 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-16T20:33:46,599 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc 2024-11-16T20:33:46,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34255 is added to blk_1073741832_1008 (size=32) 2024-11-16T20:33:46,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41759 is added to blk_1073741832_1008 (size=32) 2024-11-16T20:33:46,617 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T20:33:46,620 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-16T20:33:46,624 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-16T20:33:46,624 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:33:46,625 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T20:33:46,626 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-16T20:33:46,629 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-16T20:33:46,629 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:33:46,630 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57397, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-16T20:33:46,630 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T20:33:46,631 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-16T20:33:46,634 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-16T20:33:46,634 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:33:46,635 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T20:33:46,636 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-16T20:33:46,638 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34669 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 40c018648b21,41609,1731789225506 2024-11-16T20:33:46,639 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-16T20:33:46,639 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:33:46,641 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T20:33:46,641 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-16T20:33:46,641 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34669 {}] master.ServerManager(517): Registering regionserver=40c018648b21,41609,1731789225506 2024-11-16T20:33:46,642 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/data/hbase/meta/1588230740 2024-11-16T20:33:46,643 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/data/hbase/meta/1588230740 2024-11-16T20:33:46,646 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-16T20:33:46,647 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-16T20:33:46,648 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-16T20:33:46,652 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-16T20:33:46,656 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T20:33:46,657 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=741106, jitterRate=-0.05763570964336395}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-16T20:33:46,658 DEBUG [RS:0;40c018648b21:41609 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc 2024-11-16T20:33:46,658 DEBUG [RS:0;40c018648b21:41609 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:38761 2024-11-16T20:33:46,658 DEBUG [RS:0;40c018648b21:41609 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-16T20:33:46,661 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731789226617Initializing all the Stores at 1731789226620 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731789226620Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731789226620Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731789226620Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731789226620Cleaning up temporary data from old regions at 1731789226647 (+27 ms)Region opened successfully at 1731789226661 (+14 ms) 2024-11-16T20:33:46,662 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-16T20:33:46,662 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-16T20:33:46,662 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-16T20:33:46,662 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-16T20:33:46,662 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-16T20:33:46,663 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-16T20:33:46,663 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731789226662Disabling compacts and flushes for region at 1731789226662Disabling writes for close at 1731789226662Writing region close event to WAL at 1731789226663 (+1 ms)Closed at 1731789226663 2024-11-16T20:33:46,665 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34669-0x101455a73c80000, quorum=127.0.0.1:60663, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-16T20:33:46,665 DEBUG [RS:0;40c018648b21:41609 {}] zookeeper.ZKUtil(111): regionserver:41609-0x101455a73c80001, quorum=127.0.0.1:60663, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/40c018648b21,41609,1731789225506 2024-11-16T20:33:46,666 WARN [RS:0;40c018648b21:41609 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-16T20:33:46,666 INFO [RS:0;40c018648b21:41609 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T20:33:46,666 DEBUG [RS:0;40c018648b21:41609 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/WALs/40c018648b21,41609,1731789225506 2024-11-16T20:33:46,666 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T20:33:46,666 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-16T20:33:46,669 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [40c018648b21,41609,1731789225506] 2024-11-16T20:33:46,673 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-16T20:33:46,681 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-16T20:33:46,683 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-16T20:33:46,690 INFO [RS:0;40c018648b21:41609 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-16T20:33:46,704 INFO [RS:0;40c018648b21:41609 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-16T20:33:46,709 INFO [RS:0;40c018648b21:41609 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-16T20:33:46,709 INFO [RS:0;40c018648b21:41609 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T20:33:46,710 INFO [RS:0;40c018648b21:41609 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-16T20:33:46,717 INFO [RS:0;40c018648b21:41609 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-16T20:33:46,718 INFO [RS:0;40c018648b21:41609 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-16T20:33:46,718 DEBUG [RS:0;40c018648b21:41609 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:33:46,718 DEBUG [RS:0;40c018648b21:41609 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:33:46,719 DEBUG [RS:0;40c018648b21:41609 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:33:46,719 DEBUG [RS:0;40c018648b21:41609 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:33:46,719 DEBUG [RS:0;40c018648b21:41609 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:33:46,719 DEBUG [RS:0;40c018648b21:41609 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/40c018648b21:0, corePoolSize=2, maxPoolSize=2 2024-11-16T20:33:46,719 DEBUG [RS:0;40c018648b21:41609 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:33:46,720 DEBUG [RS:0;40c018648b21:41609 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:33:46,720 DEBUG [RS:0;40c018648b21:41609 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:33:46,720 DEBUG [RS:0;40c018648b21:41609 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:33:46,720 DEBUG [RS:0;40c018648b21:41609 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:33:46,720 DEBUG [RS:0;40c018648b21:41609 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:33:46,720 DEBUG [RS:0;40c018648b21:41609 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/40c018648b21:0, corePoolSize=3, maxPoolSize=3 2024-11-16T20:33:46,720 DEBUG [RS:0;40c018648b21:41609 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/40c018648b21:0, corePoolSize=3, maxPoolSize=3 2024-11-16T20:33:46,721 INFO [RS:0;40c018648b21:41609 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T20:33:46,721 INFO [RS:0;40c018648b21:41609 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T20:33:46,721 INFO [RS:0;40c018648b21:41609 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T20:33:46,721 INFO [RS:0;40c018648b21:41609 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-16T20:33:46,722 INFO [RS:0;40c018648b21:41609 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-16T20:33:46,722 INFO [RS:0;40c018648b21:41609 {}] hbase.ChoreService(168): Chore ScheduledChore name=40c018648b21,41609,1731789225506-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-16T20:33:46,738 INFO [RS:0;40c018648b21:41609 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-16T20:33:46,739 INFO [RS:0;40c018648b21:41609 {}] hbase.ChoreService(168): Chore ScheduledChore name=40c018648b21,41609,1731789225506-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T20:33:46,740 INFO [RS:0;40c018648b21:41609 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T20:33:46,740 INFO [RS:0;40c018648b21:41609 {}] regionserver.Replication(171): 40c018648b21,41609,1731789225506 started 2024-11-16T20:33:46,759 INFO [RS:0;40c018648b21:41609 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T20:33:46,760 INFO [RS:0;40c018648b21:41609 {}] regionserver.HRegionServer(1482): Serving as 40c018648b21,41609,1731789225506, RpcServer on 40c018648b21/172.17.0.2:41609, sessionid=0x101455a73c80001 2024-11-16T20:33:46,761 DEBUG [RS:0;40c018648b21:41609 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-16T20:33:46,761 DEBUG [RS:0;40c018648b21:41609 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 40c018648b21,41609,1731789225506 2024-11-16T20:33:46,761 DEBUG [RS:0;40c018648b21:41609 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '40c018648b21,41609,1731789225506' 2024-11-16T20:33:46,761 DEBUG [RS:0;40c018648b21:41609 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-16T20:33:46,763 DEBUG [RS:0;40c018648b21:41609 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-16T20:33:46,763 DEBUG [RS:0;40c018648b21:41609 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-16T20:33:46,763 DEBUG [RS:0;40c018648b21:41609 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-16T20:33:46,763 DEBUG [RS:0;40c018648b21:41609 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 40c018648b21,41609,1731789225506 2024-11-16T20:33:46,764 DEBUG [RS:0;40c018648b21:41609 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '40c018648b21,41609,1731789225506' 2024-11-16T20:33:46,764 DEBUG [RS:0;40c018648b21:41609 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-16T20:33:46,764 DEBUG [RS:0;40c018648b21:41609 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-16T20:33:46,765 DEBUG [RS:0;40c018648b21:41609 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-16T20:33:46,765 INFO [RS:0;40c018648b21:41609 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-16T20:33:46,765 INFO [RS:0;40c018648b21:41609 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-16T20:33:46,834 WARN [40c018648b21:34669 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-16T20:33:46,875 INFO [RS:0;40c018648b21:41609 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=40c018648b21%2C41609%2C1731789225506, suffix=, logDir=hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/WALs/40c018648b21,41609,1731789225506, archiveDir=hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/oldWALs, maxLogs=32 2024-11-16T20:33:46,878 INFO [RS:0;40c018648b21:41609 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 40c018648b21%2C41609%2C1731789225506.1731789226878 2024-11-16T20:33:46,886 INFO [RS:0;40c018648b21:41609 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/WALs/40c018648b21,41609,1731789225506/40c018648b21%2C41609%2C1731789225506.1731789226878 2024-11-16T20:33:46,890 DEBUG [RS:0;40c018648b21:41609 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34099:34099),(127.0.0.1/127.0.0.1:43385:43385)] 2024-11-16T20:33:47,089 DEBUG [40c018648b21:34669 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-16T20:33:47,103 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=40c018648b21,41609,1731789225506 2024-11-16T20:33:47,108 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 40c018648b21,41609,1731789225506, state=OPENING 2024-11-16T20:33:47,160 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-16T20:33:47,171 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41609-0x101455a73c80001, quorum=127.0.0.1:60663, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:33:47,170 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34669-0x101455a73c80000, quorum=127.0.0.1:60663, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:33:47,173 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T20:33:47,173 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T20:33:47,176 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-16T20:33:47,180 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=40c018648b21,41609,1731789225506}] 2024-11-16T20:33:47,359 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-16T20:33:47,363 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33999, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-16T20:33:47,374 INFO [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-16T20:33:47,375 INFO [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T20:33:47,378 INFO [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=40c018648b21%2C41609%2C1731789225506.meta, suffix=.meta, logDir=hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/WALs/40c018648b21,41609,1731789225506, archiveDir=hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/oldWALs, maxLogs=32 2024-11-16T20:33:47,380 INFO [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 40c018648b21%2C41609%2C1731789225506.meta.1731789227380.meta 2024-11-16T20:33:47,388 INFO [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/WALs/40c018648b21,41609,1731789225506/40c018648b21%2C41609%2C1731789225506.meta.1731789227380.meta 2024-11-16T20:33:47,390 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34099:34099),(127.0.0.1/127.0.0.1:43385:43385)] 2024-11-16T20:33:47,392 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-16T20:33:47,394 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-16T20:33:47,396 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-16T20:33:47,400 INFO [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-16T20:33:47,404 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-16T20:33:47,404 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T20:33:47,405 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-16T20:33:47,405 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-16T20:33:47,408 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-16T20:33:47,409 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-16T20:33:47,409 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:33:47,410 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T20:33:47,410 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-16T20:33:47,412 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-16T20:33:47,412 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:33:47,413 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T20:33:47,413 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-16T20:33:47,414 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-16T20:33:47,414 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:33:47,415 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T20:33:47,415 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-16T20:33:47,417 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-16T20:33:47,417 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:33:47,417 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T20:33:47,418 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-16T20:33:47,419 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/data/hbase/meta/1588230740 2024-11-16T20:33:47,422 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/data/hbase/meta/1588230740 2024-11-16T20:33:47,424 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-16T20:33:47,424 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-16T20:33:47,425 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-16T20:33:47,428 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-16T20:33:47,430 INFO [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=759053, jitterRate=-0.03481544554233551}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-16T20:33:47,430 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-16T20:33:47,432 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731789227405Writing region info on filesystem at 1731789227406 (+1 ms)Initializing all the Stores at 1731789227407 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731789227407Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731789227407Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731789227408 (+1 ms)Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731789227408Cleaning up temporary data from old regions at 1731789227424 (+16 ms)Running coprocessor post-open hooks at 1731789227430 (+6 ms)Region opened successfully at 1731789227432 (+2 ms) 2024-11-16T20:33:47,438 INFO [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731789227351 2024-11-16T20:33:47,448 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-16T20:33:47,448 INFO [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-16T20:33:47,450 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=40c018648b21,41609,1731789225506 2024-11-16T20:33:47,452 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 40c018648b21,41609,1731789225506, state=OPEN 2024-11-16T20:33:47,526 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34669-0x101455a73c80000, quorum=127.0.0.1:60663, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-16T20:33:47,526 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41609-0x101455a73c80001, quorum=127.0.0.1:60663, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-16T20:33:47,527 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T20:33:47,527 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T20:33:47,528 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=40c018648b21,41609,1731789225506 2024-11-16T20:33:47,537 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-16T20:33:47,537 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=40c018648b21,41609,1731789225506 in 349 msec 2024-11-16T20:33:47,544 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-16T20:33:47,544 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 866 msec 2024-11-16T20:33:47,546 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T20:33:47,546 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-16T20:33:47,562 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T20:33:47,563 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=40c018648b21,41609,1731789225506, seqNum=-1] 2024-11-16T20:33:47,580 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T20:33:47,582 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47589, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T20:33:47,600 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.1020 sec 2024-11-16T20:33:47,601 INFO [master/40c018648b21:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731789227601, completionTime=-1 2024-11-16T20:33:47,603 INFO [master/40c018648b21:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-16T20:33:47,603 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-16T20:33:47,628 INFO [master/40c018648b21:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-16T20:33:47,628 INFO [master/40c018648b21:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731789287628 2024-11-16T20:33:47,628 INFO [master/40c018648b21:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731789347628 2024-11-16T20:33:47,628 INFO [master/40c018648b21:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 24 msec 2024-11-16T20:33:47,630 INFO [master/40c018648b21:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=40c018648b21,34669,1731789224801-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T20:33:47,631 INFO [master/40c018648b21:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=40c018648b21,34669,1731789224801-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T20:33:47,631 INFO [master/40c018648b21:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=40c018648b21,34669,1731789224801-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T20:33:47,632 INFO [master/40c018648b21:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-40c018648b21:34669, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T20:33:47,632 INFO [master/40c018648b21:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-16T20:33:47,633 INFO [master/40c018648b21:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-16T20:33:47,639 DEBUG [master/40c018648b21:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-16T20:33:47,658 INFO [master/40c018648b21:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.992sec 2024-11-16T20:33:47,659 INFO [master/40c018648b21:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-16T20:33:47,660 INFO [master/40c018648b21:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-16T20:33:47,661 INFO [master/40c018648b21:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-16T20:33:47,662 INFO [master/40c018648b21:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-16T20:33:47,662 INFO [master/40c018648b21:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-16T20:33:47,663 INFO [master/40c018648b21:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=40c018648b21,34669,1731789224801-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-16T20:33:47,663 INFO [master/40c018648b21:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=40c018648b21,34669,1731789224801-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-16T20:33:47,671 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-16T20:33:47,671 INFO [master/40c018648b21:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-16T20:33:47,672 INFO [master/40c018648b21:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=40c018648b21,34669,1731789224801-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T20:33:47,710 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4c233f9d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T20:33:47,712 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-16T20:33:47,713 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-16T20:33:47,716 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 40c018648b21,34669,-1 for getting cluster id 2024-11-16T20:33:47,719 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-16T20:33:47,726 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'f1d5dc12-4a5c-46aa-8a74-967dcbd7f113' 2024-11-16T20:33:47,729 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-16T20:33:47,729 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "f1d5dc12-4a5c-46aa-8a74-967dcbd7f113" 2024-11-16T20:33:47,731 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@33ae194b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T20:33:47,731 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [40c018648b21,34669,-1] 2024-11-16T20:33:47,733 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-16T20:33:47,735 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T20:33:47,737 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57248, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-16T20:33:47,739 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@783fe34b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T20:33:47,740 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T20:33:47,747 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=40c018648b21,41609,1731789225506, seqNum=-1] 2024-11-16T20:33:47,747 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T20:33:47,749 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42240, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T20:33:47,767 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=40c018648b21,34669,1731789224801 2024-11-16T20:33:47,768 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T20:33:47,776 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-16T20:33:47,781 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-16T20:33:47,787 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 40c018648b21,34669,1731789224801 2024-11-16T20:33:47,790 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@5ae36ead 2024-11-16T20:33:47,792 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-16T20:33:47,796 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57262, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-16T20:33:47,798 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34669 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-16T20:33:47,798 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34669 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-16T20:33:47,803 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34669 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testSlowSyncLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-16T20:33:47,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34669 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling 2024-11-16T20:33:47,833 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-16T20:33:47,836 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34669 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testSlowSyncLogRolling" procId is: 4 2024-11-16T20:33:47,836 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:33:47,839 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-16T20:33:47,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34669 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-16T20:33:47,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41759 is added to blk_1073741835_1011 (size=389) 2024-11-16T20:33:47,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34255 is added to blk_1073741835_1011 (size=389) 2024-11-16T20:33:47,887 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => ae4cd1d3889650ff27c29755dacfee40, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1731789227798.ae4cd1d3889650ff27c29755dacfee40.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testSlowSyncLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc 2024-11-16T20:33:47,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41759 is added to blk_1073741836_1012 (size=72) 2024-11-16T20:33:47,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34255 is added to blk_1073741836_1012 (size=72) 2024-11-16T20:33:47,899 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1731789227798.ae4cd1d3889650ff27c29755dacfee40.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T20:33:47,900 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing ae4cd1d3889650ff27c29755dacfee40, disabling compactions & flushes 2024-11-16T20:33:47,900 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1731789227798.ae4cd1d3889650ff27c29755dacfee40. 2024-11-16T20:33:47,900 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1731789227798.ae4cd1d3889650ff27c29755dacfee40. 2024-11-16T20:33:47,900 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1731789227798.ae4cd1d3889650ff27c29755dacfee40. after waiting 0 ms 2024-11-16T20:33:47,900 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1731789227798.ae4cd1d3889650ff27c29755dacfee40. 2024-11-16T20:33:47,900 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1731789227798.ae4cd1d3889650ff27c29755dacfee40. 2024-11-16T20:33:47,900 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for ae4cd1d3889650ff27c29755dacfee40: Waiting for close lock at 1731789227900Disabling compacts and flushes for region at 1731789227900Disabling writes for close at 1731789227900Writing region close event to WAL at 1731789227900Closed at 1731789227900 2024-11-16T20:33:47,902 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-16T20:33:47,906 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testSlowSyncLogRolling,,1731789227798.ae4cd1d3889650ff27c29755dacfee40.","families":{"info":[{"qualifier":"regioninfo","vlen":71,"tag":[],"timestamp":"1731789227902"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731789227902"}]},"ts":"1731789227902"} 2024-11-16T20:33:47,911 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-16T20:33:47,913 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-16T20:33:47,916 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731789227913"}]},"ts":"1731789227913"} 2024-11-16T20:33:47,921 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLING in hbase:meta 2024-11-16T20:33:47,923 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=ae4cd1d3889650ff27c29755dacfee40, ASSIGN}] 2024-11-16T20:33:47,925 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=ae4cd1d3889650ff27c29755dacfee40, ASSIGN 2024-11-16T20:33:47,927 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=ae4cd1d3889650ff27c29755dacfee40, ASSIGN; state=OFFLINE, location=40c018648b21,41609,1731789225506; forceNewPlan=false, retain=false 2024-11-16T20:33:48,079 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=ae4cd1d3889650ff27c29755dacfee40, regionState=OPENING, regionLocation=40c018648b21,41609,1731789225506 2024-11-16T20:33:48,084 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=ae4cd1d3889650ff27c29755dacfee40, ASSIGN because future has completed 2024-11-16T20:33:48,085 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure ae4cd1d3889650ff27c29755dacfee40, server=40c018648b21,41609,1731789225506}] 2024-11-16T20:33:48,252 INFO [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testSlowSyncLogRolling,,1731789227798.ae4cd1d3889650ff27c29755dacfee40. 2024-11-16T20:33:48,252 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => ae4cd1d3889650ff27c29755dacfee40, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1731789227798.ae4cd1d3889650ff27c29755dacfee40.', STARTKEY => '', ENDKEY => ''} 2024-11-16T20:33:48,253 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testSlowSyncLogRolling ae4cd1d3889650ff27c29755dacfee40 2024-11-16T20:33:48,253 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1731789227798.ae4cd1d3889650ff27c29755dacfee40.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T20:33:48,253 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for ae4cd1d3889650ff27c29755dacfee40 2024-11-16T20:33:48,253 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for ae4cd1d3889650ff27c29755dacfee40 2024-11-16T20:33:48,256 INFO [StoreOpener-ae4cd1d3889650ff27c29755dacfee40-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region ae4cd1d3889650ff27c29755dacfee40 2024-11-16T20:33:48,258 INFO [StoreOpener-ae4cd1d3889650ff27c29755dacfee40-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ae4cd1d3889650ff27c29755dacfee40 columnFamilyName info 2024-11-16T20:33:48,259 DEBUG [StoreOpener-ae4cd1d3889650ff27c29755dacfee40-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:33:48,260 INFO [StoreOpener-ae4cd1d3889650ff27c29755dacfee40-1 {}] regionserver.HStore(327): Store=ae4cd1d3889650ff27c29755dacfee40/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T20:33:48,260 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for ae4cd1d3889650ff27c29755dacfee40 2024-11-16T20:33:48,262 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/data/default/TestLogRolling-testSlowSyncLogRolling/ae4cd1d3889650ff27c29755dacfee40 2024-11-16T20:33:48,263 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/data/default/TestLogRolling-testSlowSyncLogRolling/ae4cd1d3889650ff27c29755dacfee40 2024-11-16T20:33:48,264 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for ae4cd1d3889650ff27c29755dacfee40 2024-11-16T20:33:48,264 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for ae4cd1d3889650ff27c29755dacfee40 2024-11-16T20:33:48,267 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for ae4cd1d3889650ff27c29755dacfee40 2024-11-16T20:33:48,270 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/data/default/TestLogRolling-testSlowSyncLogRolling/ae4cd1d3889650ff27c29755dacfee40/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T20:33:48,271 INFO [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened ae4cd1d3889650ff27c29755dacfee40; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=814181, jitterRate=0.03528568148612976}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-16T20:33:48,271 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for ae4cd1d3889650ff27c29755dacfee40 2024-11-16T20:33:48,272 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for ae4cd1d3889650ff27c29755dacfee40: Running coprocessor pre-open hook at 1731789228253Writing region info on filesystem at 1731789228253Initializing all the Stores at 1731789228255 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731789228255Cleaning up temporary data from old regions at 1731789228264 (+9 ms)Running coprocessor post-open hooks at 1731789228271 (+7 ms)Region opened successfully at 1731789228272 (+1 ms) 2024-11-16T20:33:48,274 INFO [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testSlowSyncLogRolling,,1731789227798.ae4cd1d3889650ff27c29755dacfee40., pid=6, masterSystemTime=1731789228240 2024-11-16T20:33:48,279 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testSlowSyncLogRolling,,1731789227798.ae4cd1d3889650ff27c29755dacfee40. 2024-11-16T20:33:48,279 INFO [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testSlowSyncLogRolling,,1731789227798.ae4cd1d3889650ff27c29755dacfee40. 2024-11-16T20:33:48,280 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=ae4cd1d3889650ff27c29755dacfee40, regionState=OPEN, openSeqNum=2, regionLocation=40c018648b21,41609,1731789225506 2024-11-16T20:33:48,283 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure ae4cd1d3889650ff27c29755dacfee40, server=40c018648b21,41609,1731789225506 because future has completed 2024-11-16T20:33:48,290 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-16T20:33:48,291 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure ae4cd1d3889650ff27c29755dacfee40, server=40c018648b21,41609,1731789225506 in 201 msec 2024-11-16T20:33:48,294 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-16T20:33:48,294 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=ae4cd1d3889650ff27c29755dacfee40, ASSIGN in 367 msec 2024-11-16T20:33:48,295 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-16T20:33:48,296 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731789228296"}]},"ts":"1731789228296"} 2024-11-16T20:33:48,299 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLED in hbase:meta 2024-11-16T20:33:48,301 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-16T20:33:48,304 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling in 495 msec 2024-11-16T20:33:52,920 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-16T20:33:52,969 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-16T20:33:52,970 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testSlowSyncLogRolling' 2024-11-16T20:33:55,145 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-16T20:33:55,146 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-16T20:33:55,152 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-16T20:33:55,152 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-16T20:33:55,154 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-16T20:33:55,154 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-16T20:33:55,154 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-16T20:33:55,154 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-16T20:33:57,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34669 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-16T20:33:57,927 INFO [RPCClient-NioEventLoopGroup-4-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testSlowSyncLogRolling completed 2024-11-16T20:33:57,931 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testSlowSyncLogRolling,, stopping at row=TestLogRolling-testSlowSyncLogRolling ,, for max=2147483647 with caching=100 2024-11-16T20:33:57,939 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testSlowSyncLogRolling 2024-11-16T20:33:57,939 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testSlowSyncLogRolling,,1731789227798.ae4cd1d3889650ff27c29755dacfee40. 2024-11-16T20:33:57,940 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 40c018648b21%2C41609%2C1731789225506.1731789237940 2024-11-16T20:33:57,952 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:33:57,953 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:33:57,953 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:33:57,953 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:33:57,953 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:33:57,954 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/WALs/40c018648b21,41609,1731789225506/40c018648b21%2C41609%2C1731789225506.1731789226878 with entries=1, filesize=443 B; new WAL /user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/WALs/40c018648b21,41609,1731789225506/40c018648b21%2C41609%2C1731789225506.1731789237940 2024-11-16T20:33:57,957 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43385:43385),(127.0.0.1/127.0.0.1:34099:34099)] 2024-11-16T20:33:57,957 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/WALs/40c018648b21,41609,1731789225506/40c018648b21%2C41609%2C1731789225506.1731789226878 is not closed yet, will try archiving it next time 2024-11-16T20:33:57,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41759 is added to blk_1073741833_1009 (size=451) 2024-11-16T20:33:57,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34255 is added to blk_1073741833_1009 (size=451) 2024-11-16T20:33:57,960 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/WALs/40c018648b21,41609,1731789225506/40c018648b21%2C41609%2C1731789225506.1731789226878 to hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/oldWALs/40c018648b21%2C41609%2C1731789225506.1731789226878 2024-11-16T20:33:57,967 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testSlowSyncLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testSlowSyncLogRolling,,1731789227798.ae4cd1d3889650ff27c29755dacfee40., hostname=40c018648b21,41609,1731789225506, seqNum=2] 2024-11-16T20:34:10,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41609 {}] regionserver.HRegion(8855): Flush requested on ae4cd1d3889650ff27c29755dacfee40 2024-11-16T20:34:10,005 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ae4cd1d3889650ff27c29755dacfee40 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-16T20:34:10,080 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/data/default/TestLogRolling-testSlowSyncLogRolling/ae4cd1d3889650ff27c29755dacfee40/.tmp/info/ed13fa78cfd040a3ab6bbb77231237c9 is 1080, key is row0001/info:/1731789237969/Put/seqid=0 2024-11-16T20:34:10,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34255 is added to blk_1073741838_1014 (size=12509) 2024-11-16T20:34:10,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41759 is added to blk_1073741838_1014 (size=12509) 2024-11-16T20:34:10,096 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/data/default/TestLogRolling-testSlowSyncLogRolling/ae4cd1d3889650ff27c29755dacfee40/.tmp/info/ed13fa78cfd040a3ab6bbb77231237c9 2024-11-16T20:34:10,153 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/data/default/TestLogRolling-testSlowSyncLogRolling/ae4cd1d3889650ff27c29755dacfee40/.tmp/info/ed13fa78cfd040a3ab6bbb77231237c9 as hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/data/default/TestLogRolling-testSlowSyncLogRolling/ae4cd1d3889650ff27c29755dacfee40/info/ed13fa78cfd040a3ab6bbb77231237c9 2024-11-16T20:34:10,171 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/data/default/TestLogRolling-testSlowSyncLogRolling/ae4cd1d3889650ff27c29755dacfee40/info/ed13fa78cfd040a3ab6bbb77231237c9, entries=7, sequenceid=11, filesize=12.2 K 2024-11-16T20:34:10,179 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for ae4cd1d3889650ff27c29755dacfee40 in 173ms, sequenceid=11, compaction requested=false 2024-11-16T20:34:10,180 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ae4cd1d3889650ff27c29755dacfee40: 2024-11-16T20:34:13,910 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-16T20:34:18,013 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 40c018648b21%2C41609%2C1731789225506.1731789258013 2024-11-16T20:34:18,222 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 206 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41759,DS-8b4ce1c8-2add-4a01-ba07-62af67e7d0be,DISK], DatanodeInfoWithStorage[127.0.0.1:34255,DS-051aaca9-c637-4f47-8502-a207905dede7,DISK]] 2024-11-16T20:34:18,222 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:34:18,222 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:34:18,222 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:34:18,222 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:34:18,222 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:34:18,223 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/WALs/40c018648b21,41609,1731789225506/40c018648b21%2C41609%2C1731789225506.1731789237940 with entries=12, filesize=12.10 KB; new WAL /user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/WALs/40c018648b21,41609,1731789225506/40c018648b21%2C41609%2C1731789225506.1731789258013 2024-11-16T20:34:18,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34255 is added to blk_1073741837_1013 (size=12399) 2024-11-16T20:34:18,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41759 is added to blk_1073741837_1013 (size=12399) 2024-11-16T20:34:18,228 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34099:34099),(127.0.0.1/127.0.0.1:43385:43385)] 2024-11-16T20:34:18,431 INFO [FSHLog-0-hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc-prefix:40c018648b21,41609,1731789225506 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34255,DS-051aaca9-c637-4f47-8502-a207905dede7,DISK], DatanodeInfoWithStorage[127.0.0.1:41759,DS-8b4ce1c8-2add-4a01-ba07-62af67e7d0be,DISK]] 2024-11-16T20:34:20,635 INFO [FSHLog-0-hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc-prefix:40c018648b21,41609,1731789225506 {}] wal.AbstractFSWAL(1368): Slow sync cost: 200 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34255,DS-051aaca9-c637-4f47-8502-a207905dede7,DISK], DatanodeInfoWithStorage[127.0.0.1:41759,DS-8b4ce1c8-2add-4a01-ba07-62af67e7d0be,DISK]] 2024-11-16T20:34:22,839 INFO [FSHLog-0-hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc-prefix:40c018648b21,41609,1731789225506 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34255,DS-051aaca9-c637-4f47-8502-a207905dede7,DISK], DatanodeInfoWithStorage[127.0.0.1:41759,DS-8b4ce1c8-2add-4a01-ba07-62af67e7d0be,DISK]] 2024-11-16T20:34:25,043 INFO [FSHLog-0-hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc-prefix:40c018648b21,41609,1731789225506 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34255,DS-051aaca9-c637-4f47-8502-a207905dede7,DISK], DatanodeInfoWithStorage[127.0.0.1:41759,DS-8b4ce1c8-2add-4a01-ba07-62af67e7d0be,DISK]] 2024-11-16T20:34:25,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41609 {}] regionserver.HRegion(8855): Flush requested on ae4cd1d3889650ff27c29755dacfee40 2024-11-16T20:34:25,043 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ae4cd1d3889650ff27c29755dacfee40 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-16T20:34:25,245 INFO [FSHLog-0-hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc-prefix:40c018648b21,41609,1731789225506 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34255,DS-051aaca9-c637-4f47-8502-a207905dede7,DISK], DatanodeInfoWithStorage[127.0.0.1:41759,DS-8b4ce1c8-2add-4a01-ba07-62af67e7d0be,DISK]] 2024-11-16T20:34:25,250 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/data/default/TestLogRolling-testSlowSyncLogRolling/ae4cd1d3889650ff27c29755dacfee40/.tmp/info/8862305dc79a44d5a7dbbaab22550c57 is 1080, key is row0008/info:/1731789252003/Put/seqid=0 2024-11-16T20:34:25,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41759 is added to blk_1073741840_1016 (size=12509) 2024-11-16T20:34:25,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34255 is added to blk_1073741840_1016 (size=12509) 2024-11-16T20:34:25,266 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/data/default/TestLogRolling-testSlowSyncLogRolling/ae4cd1d3889650ff27c29755dacfee40/.tmp/info/8862305dc79a44d5a7dbbaab22550c57 2024-11-16T20:34:25,282 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/data/default/TestLogRolling-testSlowSyncLogRolling/ae4cd1d3889650ff27c29755dacfee40/.tmp/info/8862305dc79a44d5a7dbbaab22550c57 as hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/data/default/TestLogRolling-testSlowSyncLogRolling/ae4cd1d3889650ff27c29755dacfee40/info/8862305dc79a44d5a7dbbaab22550c57 2024-11-16T20:34:25,296 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/data/default/TestLogRolling-testSlowSyncLogRolling/ae4cd1d3889650ff27c29755dacfee40/info/8862305dc79a44d5a7dbbaab22550c57, entries=7, sequenceid=21, filesize=12.2 K 2024-11-16T20:34:25,498 INFO [FSHLog-0-hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc-prefix:40c018648b21,41609,1731789225506 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34255,DS-051aaca9-c637-4f47-8502-a207905dede7,DISK], DatanodeInfoWithStorage[127.0.0.1:41759,DS-8b4ce1c8-2add-4a01-ba07-62af67e7d0be,DISK]] 2024-11-16T20:34:25,498 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for ae4cd1d3889650ff27c29755dacfee40 in 455ms, sequenceid=21, compaction requested=false 2024-11-16T20:34:25,498 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ae4cd1d3889650ff27c29755dacfee40: 2024-11-16T20:34:25,498 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=24.4 K, sizeToCheck=16.0 K 2024-11-16T20:34:25,498 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T20:34:25,499 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/data/default/TestLogRolling-testSlowSyncLogRolling/ae4cd1d3889650ff27c29755dacfee40/info/ed13fa78cfd040a3ab6bbb77231237c9 because midkey is the same as first or last row 2024-11-16T20:34:27,247 INFO [FSHLog-0-hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc-prefix:40c018648b21,41609,1731789225506 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34255,DS-051aaca9-c637-4f47-8502-a207905dede7,DISK], DatanodeInfoWithStorage[127.0.0.1:41759,DS-8b4ce1c8-2add-4a01-ba07-62af67e7d0be,DISK]] 2024-11-16T20:34:27,673 INFO [master/40c018648b21:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-16T20:34:27,673 INFO [master/40c018648b21:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-16T20:34:29,455 INFO [FSHLog-0-hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc-prefix:40c018648b21,41609,1731789225506 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34255,DS-051aaca9-c637-4f47-8502-a207905dede7,DISK], DatanodeInfoWithStorage[127.0.0.1:41759,DS-8b4ce1c8-2add-4a01-ba07-62af67e7d0be,DISK]] 2024-11-16T20:34:29,459 WARN [FSHLog-0-hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc-prefix:40c018648b21,41609,1731789225506 {}] wal.AbstractFSWAL(2201): Requesting log roll because we exceeded slow sync threshold; count=8, threshold=5, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34255,DS-051aaca9-c637-4f47-8502-a207905dede7,DISK], DatanodeInfoWithStorage[127.0.0.1:41759,DS-8b4ce1c8-2add-4a01-ba07-62af67e7d0be,DISK]] 2024-11-16T20:34:29,460 DEBUG [regionserver/40c018648b21:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 40c018648b21%2C41609%2C1731789225506:(num 1731789258013) roll requested 2024-11-16T20:34:29,461 INFO [regionserver/40c018648b21:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 40c018648b21%2C41609%2C1731789225506.1731789269461 2024-11-16T20:34:29,674 INFO [regionserver/40c018648b21:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 210 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34255,DS-051aaca9-c637-4f47-8502-a207905dede7,DISK], DatanodeInfoWithStorage[127.0.0.1:41759,DS-8b4ce1c8-2add-4a01-ba07-62af67e7d0be,DISK]] 2024-11-16T20:34:29,674 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:34:29,675 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:34:29,675 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:34:29,675 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:34:29,675 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:34:29,676 INFO [regionserver/40c018648b21:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/WALs/40c018648b21,41609,1731789225506/40c018648b21%2C41609%2C1731789225506.1731789258013 with entries=8, filesize=7.55 KB; new WAL /user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/WALs/40c018648b21,41609,1731789225506/40c018648b21%2C41609%2C1731789225506.1731789269461 2024-11-16T20:34:29,677 DEBUG [regionserver/40c018648b21:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43385:43385),(127.0.0.1/127.0.0.1:34099:34099)] 2024-11-16T20:34:29,677 DEBUG [regionserver/40c018648b21:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/WALs/40c018648b21,41609,1731789225506/40c018648b21%2C41609%2C1731789225506.1731789258013 is not closed yet, will try archiving it next time 2024-11-16T20:34:29,678 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/WALs/40c018648b21,41609,1731789225506/40c018648b21%2C41609%2C1731789225506.1731789237940 to hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/oldWALs/40c018648b21%2C41609%2C1731789225506.1731789237940 2024-11-16T20:34:29,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41759 is added to blk_1073741839_1015 (size=7739) 2024-11-16T20:34:29,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34255 is added to blk_1073741839_1015 (size=7739) 2024-11-16T20:34:31,660 INFO [FSHLog-0-hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc-prefix:40c018648b21,41609,1731789225506 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41759,DS-8b4ce1c8-2add-4a01-ba07-62af67e7d0be,DISK], DatanodeInfoWithStorage[127.0.0.1:34255,DS-051aaca9-c637-4f47-8502-a207905dede7,DISK]] 2024-11-16T20:34:33,253 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region ae4cd1d3889650ff27c29755dacfee40, had cached 0 bytes from a total of 25018 2024-11-16T20:34:33,867 INFO [FSHLog-0-hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc-prefix:40c018648b21,41609,1731789225506 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41759,DS-8b4ce1c8-2add-4a01-ba07-62af67e7d0be,DISK], DatanodeInfoWithStorage[127.0.0.1:34255,DS-051aaca9-c637-4f47-8502-a207905dede7,DISK]] 2024-11-16T20:34:36,073 INFO [FSHLog-0-hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc-prefix:40c018648b21,41609,1731789225506 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41759,DS-8b4ce1c8-2add-4a01-ba07-62af67e7d0be,DISK], DatanodeInfoWithStorage[127.0.0.1:34255,DS-051aaca9-c637-4f47-8502-a207905dede7,DISK]] 2024-11-16T20:34:38,278 INFO [FSHLog-0-hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc-prefix:40c018648b21,41609,1731789225506 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41759,DS-8b4ce1c8-2add-4a01-ba07-62af67e7d0be,DISK], DatanodeInfoWithStorage[127.0.0.1:34255,DS-051aaca9-c637-4f47-8502-a207905dede7,DISK]] 2024-11-16T20:34:40,283 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-16T20:34:40,284 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 40c018648b21%2C41609%2C1731789225506.1731789280283 2024-11-16T20:34:43,910 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-16T20:34:45,297 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 5005 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41759,DS-8b4ce1c8-2add-4a01-ba07-62af67e7d0be,DISK], DatanodeInfoWithStorage[127.0.0.1:34255,DS-051aaca9-c637-4f47-8502-a207905dede7,DISK]] 2024-11-16T20:34:45,300 WARN [Time-limited test {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5005 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41759,DS-8b4ce1c8-2add-4a01-ba07-62af67e7d0be,DISK], DatanodeInfoWithStorage[127.0.0.1:34255,DS-051aaca9-c637-4f47-8502-a207905dede7,DISK]] 2024-11-16T20:34:45,300 DEBUG [regionserver/40c018648b21:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 40c018648b21%2C41609%2C1731789225506:(num 1731789280283) roll requested 2024-11-16T20:34:45,300 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:34:45,300 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:34:45,301 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:34:45,301 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:34:45,301 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:34:45,301 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/WALs/40c018648b21,41609,1731789225506/40c018648b21%2C41609%2C1731789225506.1731789269461 with entries=4, filesize=4.63 KB; new WAL /user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/WALs/40c018648b21,41609,1731789225506/40c018648b21%2C41609%2C1731789225506.1731789280283 2024-11-16T20:34:45,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34255 is added to blk_1073741841_1017 (size=4753) 2024-11-16T20:34:45,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41759 is added to blk_1073741841_1017 (size=4753) 2024-11-16T20:34:45,308 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34099:34099),(127.0.0.1/127.0.0.1:43385:43385)] 2024-11-16T20:34:45,308 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/WALs/40c018648b21,41609,1731789225506/40c018648b21%2C41609%2C1731789225506.1731789269461 is not closed yet, will try archiving it next time 2024-11-16T20:34:45,309 INFO [regionserver/40c018648b21:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 40c018648b21%2C41609%2C1731789225506.1731789285309 2024-11-16T20:34:50,312 INFO [FSHLog-0-hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc-prefix:40c018648b21,41609,1731789225506 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34255,DS-051aaca9-c637-4f47-8502-a207905dede7,DISK], DatanodeInfoWithStorage[127.0.0.1:41759,DS-8b4ce1c8-2add-4a01-ba07-62af67e7d0be,DISK]] 2024-11-16T20:34:50,312 WARN [FSHLog-0-hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc-prefix:40c018648b21,41609,1731789225506 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34255,DS-051aaca9-c637-4f47-8502-a207905dede7,DISK], DatanodeInfoWithStorage[127.0.0.1:41759,DS-8b4ce1c8-2add-4a01-ba07-62af67e7d0be,DISK]] 2024-11-16T20:34:50,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41609 {}] regionserver.HRegion(8855): Flush requested on ae4cd1d3889650ff27c29755dacfee40 2024-11-16T20:34:50,313 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ae4cd1d3889650ff27c29755dacfee40 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-16T20:34:50,325 INFO [regionserver/40c018648b21:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5006 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34255,DS-051aaca9-c637-4f47-8502-a207905dede7,DISK], DatanodeInfoWithStorage[127.0.0.1:41759,DS-8b4ce1c8-2add-4a01-ba07-62af67e7d0be,DISK]] 2024-11-16T20:34:50,325 WARN [regionserver/40c018648b21:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5006 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34255,DS-051aaca9-c637-4f47-8502-a207905dede7,DISK], DatanodeInfoWithStorage[127.0.0.1:41759,DS-8b4ce1c8-2add-4a01-ba07-62af67e7d0be,DISK]] 2024-11-16T20:34:52,313 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-16T20:34:55,316 INFO [FSHLog-0-hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc-prefix:40c018648b21,41609,1731789225506 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5002 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34255,DS-051aaca9-c637-4f47-8502-a207905dede7,DISK], DatanodeInfoWithStorage[127.0.0.1:41759,DS-8b4ce1c8-2add-4a01-ba07-62af67e7d0be,DISK]] 2024-11-16T20:34:55,317 WARN [FSHLog-0-hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc-prefix:40c018648b21,41609,1731789225506 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5002 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34255,DS-051aaca9-c637-4f47-8502-a207905dede7,DISK], DatanodeInfoWithStorage[127.0.0.1:41759,DS-8b4ce1c8-2add-4a01-ba07-62af67e7d0be,DISK]] 2024-11-16T20:34:55,317 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:34:55,318 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:34:55,318 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:34:55,318 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:34:55,319 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:34:55,319 INFO [regionserver/40c018648b21:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/WALs/40c018648b21,41609,1731789225506/40c018648b21%2C41609%2C1731789225506.1731789280283 with entries=2, filesize=1.52 KB; new WAL /user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/WALs/40c018648b21,41609,1731789225506/40c018648b21%2C41609%2C1731789225506.1731789285309 2024-11-16T20:34:55,321 DEBUG [regionserver/40c018648b21:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43385:43385),(127.0.0.1/127.0.0.1:34099:34099)] 2024-11-16T20:34:55,321 DEBUG [regionserver/40c018648b21:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/WALs/40c018648b21,41609,1731789225506/40c018648b21%2C41609%2C1731789225506.1731789280283 is not closed yet, will try archiving it next time 2024-11-16T20:34:55,321 DEBUG [regionserver/40c018648b21:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 40c018648b21%2C41609%2C1731789225506:(num 1731789285309) roll requested 2024-11-16T20:34:55,321 INFO [regionserver/40c018648b21:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 40c018648b21%2C41609%2C1731789225506.1731789295321 2024-11-16T20:34:55,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41759 is added to blk_1073741842_1018 (size=1569) 2024-11-16T20:34:55,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34255 is added to blk_1073741842_1018 (size=1569) 2024-11-16T20:34:55,327 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/data/default/TestLogRolling-testSlowSyncLogRolling/ae4cd1d3889650ff27c29755dacfee40/.tmp/info/87e7c0b2c59447109bd9071e34e4ecce is 1080, key is row0015/info:/1731789267045/Put/seqid=0 2024-11-16T20:34:55,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41759 is added to blk_1073741844_1020 (size=12509) 2024-11-16T20:34:55,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34255 is added to blk_1073741844_1020 (size=12509) 2024-11-16T20:34:55,335 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=31 (bloomFilter=true), to=hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/data/default/TestLogRolling-testSlowSyncLogRolling/ae4cd1d3889650ff27c29755dacfee40/.tmp/info/87e7c0b2c59447109bd9071e34e4ecce 2024-11-16T20:34:55,347 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/data/default/TestLogRolling-testSlowSyncLogRolling/ae4cd1d3889650ff27c29755dacfee40/.tmp/info/87e7c0b2c59447109bd9071e34e4ecce as hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/data/default/TestLogRolling-testSlowSyncLogRolling/ae4cd1d3889650ff27c29755dacfee40/info/87e7c0b2c59447109bd9071e34e4ecce 2024-11-16T20:34:55,357 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/data/default/TestLogRolling-testSlowSyncLogRolling/ae4cd1d3889650ff27c29755dacfee40/info/87e7c0b2c59447109bd9071e34e4ecce, entries=7, sequenceid=31, filesize=12.2 K 2024-11-16T20:35:00,333 INFO [regionserver/40c018648b21:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5006 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41759,DS-8b4ce1c8-2add-4a01-ba07-62af67e7d0be,DISK], DatanodeInfoWithStorage[127.0.0.1:34255,DS-051aaca9-c637-4f47-8502-a207905dede7,DISK]] 2024-11-16T20:35:00,333 WARN [regionserver/40c018648b21:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5006 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41759,DS-8b4ce1c8-2add-4a01-ba07-62af67e7d0be,DISK], DatanodeInfoWithStorage[127.0.0.1:34255,DS-051aaca9-c637-4f47-8502-a207905dede7,DISK]] 2024-11-16T20:35:00,360 INFO [FSHLog-0-hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc-prefix:40c018648b21,41609,1731789225506 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41759,DS-8b4ce1c8-2add-4a01-ba07-62af67e7d0be,DISK], DatanodeInfoWithStorage[127.0.0.1:34255,DS-051aaca9-c637-4f47-8502-a207905dede7,DISK]] 2024-11-16T20:35:00,360 WARN [FSHLog-0-hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc-prefix:40c018648b21,41609,1731789225506 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41759,DS-8b4ce1c8-2add-4a01-ba07-62af67e7d0be,DISK], DatanodeInfoWithStorage[127.0.0.1:34255,DS-051aaca9-c637-4f47-8502-a207905dede7,DISK]] 2024-11-16T20:35:00,360 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for ae4cd1d3889650ff27c29755dacfee40 in 10047ms, sequenceid=31, compaction requested=true 2024-11-16T20:35:00,360 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:35:00,360 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ae4cd1d3889650ff27c29755dacfee40: 2024-11-16T20:35:00,360 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:35:00,360 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:35:00,360 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=36.6 K, sizeToCheck=16.0 K 2024-11-16T20:35:00,360 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:35:00,360 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T20:35:00,361 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:35:00,361 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/data/default/TestLogRolling-testSlowSyncLogRolling/ae4cd1d3889650ff27c29755dacfee40/info/ed13fa78cfd040a3ab6bbb77231237c9 because midkey is the same as first or last row 2024-11-16T20:35:00,361 INFO [regionserver/40c018648b21:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/WALs/40c018648b21,41609,1731789225506/40c018648b21%2C41609%2C1731789225506.1731789285309 with entries=1, filesize=430 B; new WAL /user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/WALs/40c018648b21,41609,1731789225506/40c018648b21%2C41609%2C1731789225506.1731789295321 2024-11-16T20:35:00,362 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ae4cd1d3889650ff27c29755dacfee40:info, priority=-2147483648, current under compaction store size is 1 2024-11-16T20:35:00,362 DEBUG [regionserver/40c018648b21:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43385:43385),(127.0.0.1/127.0.0.1:34099:34099)] 2024-11-16T20:35:00,363 DEBUG [regionserver/40c018648b21:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/WALs/40c018648b21,41609,1731789225506/40c018648b21%2C41609%2C1731789225506.1731789285309 is not closed yet, will try archiving it next time 2024-11-16T20:35:00,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34255 is added to blk_1073741843_1019 (size=438) 2024-11-16T20:35:00,363 DEBUG [regionserver/40c018648b21:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 40c018648b21%2C41609%2C1731789225506:(num 1731789295321) roll requested 2024-11-16T20:35:00,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41759 is added to blk_1073741843_1019 (size=438) 2024-11-16T20:35:00,364 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/WALs/40c018648b21,41609,1731789225506/40c018648b21%2C41609%2C1731789225506.1731789258013 to hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/oldWALs/40c018648b21%2C41609%2C1731789225506.1731789258013 2024-11-16T20:35:00,364 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 40c018648b21%2C41609%2C1731789225506.1731789300364 2024-11-16T20:35:00,365 DEBUG [RS:0;40c018648b21:41609-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-16T20:35:00,366 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T20:35:00,367 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/WALs/40c018648b21,41609,1731789225506/40c018648b21%2C41609%2C1731789225506.1731789269461 to hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/oldWALs/40c018648b21%2C41609%2C1731789225506.1731789269461 2024-11-16T20:35:00,368 DEBUG [RS:0;40c018648b21:41609-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37527 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-16T20:35:00,368 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/WALs/40c018648b21,41609,1731789225506/40c018648b21%2C41609%2C1731789225506.1731789280283 to hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/oldWALs/40c018648b21%2C41609%2C1731789225506.1731789280283 2024-11-16T20:35:00,370 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/WALs/40c018648b21,41609,1731789225506/40c018648b21%2C41609%2C1731789225506.1731789285309 to hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/oldWALs/40c018648b21%2C41609%2C1731789225506.1731789285309 2024-11-16T20:35:00,370 DEBUG [RS:0;40c018648b21:41609-shortCompactions-0 {}] regionserver.HStore(1541): ae4cd1d3889650ff27c29755dacfee40/info is initiating minor compaction (all files) 2024-11-16T20:35:00,371 INFO [RS:0;40c018648b21:41609-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of ae4cd1d3889650ff27c29755dacfee40/info in TestLogRolling-testSlowSyncLogRolling,,1731789227798.ae4cd1d3889650ff27c29755dacfee40. 2024-11-16T20:35:00,371 INFO [RS:0;40c018648b21:41609-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/data/default/TestLogRolling-testSlowSyncLogRolling/ae4cd1d3889650ff27c29755dacfee40/info/ed13fa78cfd040a3ab6bbb77231237c9, hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/data/default/TestLogRolling-testSlowSyncLogRolling/ae4cd1d3889650ff27c29755dacfee40/info/8862305dc79a44d5a7dbbaab22550c57, hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/data/default/TestLogRolling-testSlowSyncLogRolling/ae4cd1d3889650ff27c29755dacfee40/info/87e7c0b2c59447109bd9071e34e4ecce] into tmpdir=hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/data/default/TestLogRolling-testSlowSyncLogRolling/ae4cd1d3889650ff27c29755dacfee40/.tmp, totalSize=36.6 K 2024-11-16T20:35:00,373 DEBUG [RS:0;40c018648b21:41609-shortCompactions-0 {}] compactions.Compactor(225): Compacting ed13fa78cfd040a3ab6bbb77231237c9, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731789237969 2024-11-16T20:35:00,374 DEBUG [RS:0;40c018648b21:41609-shortCompactions-0 {}] compactions.Compactor(225): Compacting 8862305dc79a44d5a7dbbaab22550c57, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=21, earliestPutTs=1731789252003 2024-11-16T20:35:00,375 DEBUG [RS:0;40c018648b21:41609-shortCompactions-0 {}] compactions.Compactor(225): Compacting 87e7c0b2c59447109bd9071e34e4ecce, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=31, earliestPutTs=1731789267045 2024-11-16T20:35:00,376 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:35:00,376 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:35:00,376 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:35:00,376 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:35:00,376 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:35:00,376 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/WALs/40c018648b21,41609,1731789225506/40c018648b21%2C41609%2C1731789225506.1731789295321 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/WALs/40c018648b21,41609,1731789225506/40c018648b21%2C41609%2C1731789225506.1731789300364 2024-11-16T20:35:00,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34255 is added to blk_1073741845_1021 (size=93) 2024-11-16T20:35:00,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41759 is added to blk_1073741845_1021 (size=93) 2024-11-16T20:35:00,380 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34099:34099),(127.0.0.1/127.0.0.1:43385:43385)] 2024-11-16T20:35:00,381 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/WALs/40c018648b21,41609,1731789225506/40c018648b21%2C41609%2C1731789225506.1731789295321 is not closed yet, will try archiving it next time 2024-11-16T20:35:00,381 INFO [regionserver/40c018648b21:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 40c018648b21%2C41609%2C1731789225506.1731789300381 2024-11-16T20:35:00,388 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:35:00,389 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:35:00,389 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:35:00,389 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:35:00,389 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:35:00,389 INFO [regionserver/40c018648b21:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/WALs/40c018648b21,41609,1731789225506/40c018648b21%2C41609%2C1731789225506.1731789300364 with entries=1, filesize=1.22 KB; new WAL /user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/WALs/40c018648b21,41609,1731789225506/40c018648b21%2C41609%2C1731789225506.1731789300381 2024-11-16T20:35:00,390 DEBUG [regionserver/40c018648b21:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43385:43385),(127.0.0.1/127.0.0.1:34099:34099)] 2024-11-16T20:35:00,390 DEBUG [regionserver/40c018648b21:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/WALs/40c018648b21,41609,1731789225506/40c018648b21%2C41609%2C1731789225506.1731789295321 is not closed yet, will try archiving it next time 2024-11-16T20:35:00,390 DEBUG [regionserver/40c018648b21:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/WALs/40c018648b21,41609,1731789225506/40c018648b21%2C41609%2C1731789225506.1731789300364 is not closed yet, will try archiving it next time 2024-11-16T20:35:00,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41759 is added to blk_1073741846_1022 (size=1258) 2024-11-16T20:35:00,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34255 is added to blk_1073741846_1022 (size=1258) 2024-11-16T20:35:00,410 INFO [RS:0;40c018648b21:41609-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ae4cd1d3889650ff27c29755dacfee40#info#compaction#3 average throughput is 10.77 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-16T20:35:00,411 DEBUG [RS:0;40c018648b21:41609-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/data/default/TestLogRolling-testSlowSyncLogRolling/ae4cd1d3889650ff27c29755dacfee40/.tmp/info/dd2415afa7e34690a46e870820b4ecd2 is 1080, key is row0001/info:/1731789237969/Put/seqid=0 2024-11-16T20:35:00,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41759 is added to blk_1073741848_1024 (size=27710) 2024-11-16T20:35:00,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34255 is added to blk_1073741848_1024 (size=27710) 2024-11-16T20:35:00,427 DEBUG [RS:0;40c018648b21:41609-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/data/default/TestLogRolling-testSlowSyncLogRolling/ae4cd1d3889650ff27c29755dacfee40/.tmp/info/dd2415afa7e34690a46e870820b4ecd2 as hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/data/default/TestLogRolling-testSlowSyncLogRolling/ae4cd1d3889650ff27c29755dacfee40/info/dd2415afa7e34690a46e870820b4ecd2 2024-11-16T20:35:00,442 INFO [RS:0;40c018648b21:41609-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in ae4cd1d3889650ff27c29755dacfee40/info of ae4cd1d3889650ff27c29755dacfee40 into dd2415afa7e34690a46e870820b4ecd2(size=27.1 K), total size for store is 27.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-16T20:35:00,442 DEBUG [RS:0;40c018648b21:41609-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for ae4cd1d3889650ff27c29755dacfee40: 2024-11-16T20:35:00,443 INFO [RS:0;40c018648b21:41609-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testSlowSyncLogRolling,,1731789227798.ae4cd1d3889650ff27c29755dacfee40., storeName=ae4cd1d3889650ff27c29755dacfee40/info, priority=13, startTime=1731789300362; duration=0sec 2024-11-16T20:35:00,444 DEBUG [RS:0;40c018648b21:41609-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-16T20:35:00,444 DEBUG [RS:0;40c018648b21:41609-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T20:35:00,444 DEBUG [RS:0;40c018648b21:41609-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/data/default/TestLogRolling-testSlowSyncLogRolling/ae4cd1d3889650ff27c29755dacfee40/info/dd2415afa7e34690a46e870820b4ecd2 because midkey is the same as first or last row 2024-11-16T20:35:00,444 DEBUG [RS:0;40c018648b21:41609-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-16T20:35:00,444 DEBUG [RS:0;40c018648b21:41609-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T20:35:00,444 DEBUG [RS:0;40c018648b21:41609-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/data/default/TestLogRolling-testSlowSyncLogRolling/ae4cd1d3889650ff27c29755dacfee40/info/dd2415afa7e34690a46e870820b4ecd2 because midkey is the same as first or last row 2024-11-16T20:35:00,444 DEBUG [RS:0;40c018648b21:41609-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-16T20:35:00,444 DEBUG [RS:0;40c018648b21:41609-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T20:35:00,444 DEBUG [RS:0;40c018648b21:41609-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/data/default/TestLogRolling-testSlowSyncLogRolling/ae4cd1d3889650ff27c29755dacfee40/info/dd2415afa7e34690a46e870820b4ecd2 because midkey is the same as first or last row 2024-11-16T20:35:00,444 DEBUG [RS:0;40c018648b21:41609-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T20:35:00,445 DEBUG [RS:0;40c018648b21:41609-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ae4cd1d3889650ff27c29755dacfee40:info 2024-11-16T20:35:00,781 DEBUG [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(879): hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/WALs/40c018648b21,41609,1731789225506/40c018648b21%2C41609%2C1731789225506.1731789300364 is not closed yet, will try archiving it next time 2024-11-16T20:35:00,781 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/WALs/40c018648b21,41609,1731789225506/40c018648b21%2C41609%2C1731789225506.1731789295321 to hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/oldWALs/40c018648b21%2C41609%2C1731789225506.1731789295321 2024-11-16T20:35:12,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41609 {}] regionserver.HRegion(8855): Flush requested on ae4cd1d3889650ff27c29755dacfee40 2024-11-16T20:35:12,411 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ae4cd1d3889650ff27c29755dacfee40 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-16T20:35:12,419 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/data/default/TestLogRolling-testSlowSyncLogRolling/ae4cd1d3889650ff27c29755dacfee40/.tmp/info/b2c24d1bf531477cbcc3c2a398b3bdf8 is 1080, key is row0022/info:/1731789300382/Put/seqid=0 2024-11-16T20:35:12,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34255 is added to blk_1073741849_1025 (size=12509) 2024-11-16T20:35:12,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41759 is added to blk_1073741849_1025 (size=12509) 2024-11-16T20:35:12,427 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/data/default/TestLogRolling-testSlowSyncLogRolling/ae4cd1d3889650ff27c29755dacfee40/.tmp/info/b2c24d1bf531477cbcc3c2a398b3bdf8 2024-11-16T20:35:12,437 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/data/default/TestLogRolling-testSlowSyncLogRolling/ae4cd1d3889650ff27c29755dacfee40/.tmp/info/b2c24d1bf531477cbcc3c2a398b3bdf8 as hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/data/default/TestLogRolling-testSlowSyncLogRolling/ae4cd1d3889650ff27c29755dacfee40/info/b2c24d1bf531477cbcc3c2a398b3bdf8 2024-11-16T20:35:12,446 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/data/default/TestLogRolling-testSlowSyncLogRolling/ae4cd1d3889650ff27c29755dacfee40/info/b2c24d1bf531477cbcc3c2a398b3bdf8, entries=7, sequenceid=42, filesize=12.2 K 2024-11-16T20:35:12,447 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for ae4cd1d3889650ff27c29755dacfee40 in 37ms, sequenceid=42, compaction requested=false 2024-11-16T20:35:12,447 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ae4cd1d3889650ff27c29755dacfee40: 2024-11-16T20:35:12,448 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=39.3 K, sizeToCheck=16.0 K 2024-11-16T20:35:12,448 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T20:35:12,448 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/data/default/TestLogRolling-testSlowSyncLogRolling/ae4cd1d3889650ff27c29755dacfee40/info/dd2415afa7e34690a46e870820b4ecd2 because midkey is the same as first or last row 2024-11-16T20:35:13,911 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-16T20:35:18,254 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region ae4cd1d3889650ff27c29755dacfee40, had cached 0 bytes from a total of 40219 2024-11-16T20:35:20,457 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-16T20:35:20,457 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-16T20:35:20,458 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T20:35:20,463 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T20:35:20,464 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T20:35:20,464 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-16T20:35:20,464 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-16T20:35:20,464 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=2125010042, stopped=false 2024-11-16T20:35:20,465 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=40c018648b21,34669,1731789224801 2024-11-16T20:35:20,512 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41609-0x101455a73c80001, quorum=127.0.0.1:60663, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-16T20:35:20,512 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34669-0x101455a73c80000, quorum=127.0.0.1:60663, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-16T20:35:20,513 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41609-0x101455a73c80001, quorum=127.0.0.1:60663, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:35:20,513 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34669-0x101455a73c80000, quorum=127.0.0.1:60663, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:35:20,513 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-16T20:35:20,513 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-16T20:35:20,514 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T20:35:20,514 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T20:35:20,514 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41609-0x101455a73c80001, quorum=127.0.0.1:60663, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T20:35:20,514 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:34669-0x101455a73c80000, quorum=127.0.0.1:60663, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T20:35:20,515 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '40c018648b21,41609,1731789225506' ***** 2024-11-16T20:35:20,515 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-16T20:35:20,515 INFO [RS:0;40c018648b21:41609 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-16T20:35:20,516 INFO [RS:0;40c018648b21:41609 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-16T20:35:20,516 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-16T20:35:20,516 INFO [RS:0;40c018648b21:41609 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-16T20:35:20,517 INFO [RS:0;40c018648b21:41609 {}] regionserver.HRegionServer(3091): Received CLOSE for ae4cd1d3889650ff27c29755dacfee40 2024-11-16T20:35:20,517 INFO [RS:0;40c018648b21:41609 {}] regionserver.HRegionServer(959): stopping server 40c018648b21,41609,1731789225506 2024-11-16T20:35:20,518 INFO [RS:0;40c018648b21:41609 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-16T20:35:20,518 INFO [RS:0;40c018648b21:41609 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;40c018648b21:41609. 2024-11-16T20:35:20,518 DEBUG [RS:0;40c018648b21:41609 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T20:35:20,518 DEBUG [RS:0;40c018648b21:41609 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T20:35:20,518 DEBUG [RS_CLOSE_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing ae4cd1d3889650ff27c29755dacfee40, disabling compactions & flushes 2024-11-16T20:35:20,519 INFO [RS_CLOSE_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1731789227798.ae4cd1d3889650ff27c29755dacfee40. 2024-11-16T20:35:20,519 INFO [RS:0;40c018648b21:41609 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-16T20:35:20,519 DEBUG [RS_CLOSE_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1731789227798.ae4cd1d3889650ff27c29755dacfee40. 2024-11-16T20:35:20,519 INFO [RS:0;40c018648b21:41609 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-16T20:35:20,519 DEBUG [RS_CLOSE_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1731789227798.ae4cd1d3889650ff27c29755dacfee40. after waiting 0 ms 2024-11-16T20:35:20,519 INFO [RS:0;40c018648b21:41609 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-16T20:35:20,519 DEBUG [RS_CLOSE_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1731789227798.ae4cd1d3889650ff27c29755dacfee40. 2024-11-16T20:35:20,519 INFO [RS:0;40c018648b21:41609 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-16T20:35:20,520 INFO [RS_CLOSE_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing ae4cd1d3889650ff27c29755dacfee40 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-11-16T20:35:20,520 INFO [RS:0;40c018648b21:41609 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-16T20:35:20,520 DEBUG [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-16T20:35:20,520 INFO [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-16T20:35:20,520 DEBUG [RS:0;40c018648b21:41609 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, ae4cd1d3889650ff27c29755dacfee40=TestLogRolling-testSlowSyncLogRolling,,1731789227798.ae4cd1d3889650ff27c29755dacfee40.} 2024-11-16T20:35:20,520 DEBUG [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-16T20:35:20,520 DEBUG [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-16T20:35:20,520 DEBUG [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-16T20:35:20,521 DEBUG [RS:0;40c018648b21:41609 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, ae4cd1d3889650ff27c29755dacfee40 2024-11-16T20:35:20,521 INFO [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.65 KB heapSize=3.67 KB 2024-11-16T20:35:20,526 DEBUG [RS_CLOSE_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/data/default/TestLogRolling-testSlowSyncLogRolling/ae4cd1d3889650ff27c29755dacfee40/.tmp/info/3d1680104948475c8ba019f426d37fc5 is 1080, key is row0029/info:/1731789314413/Put/seqid=0 2024-11-16T20:35:20,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34255 is added to blk_1073741850_1026 (size=8193) 2024-11-16T20:35:20,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41759 is added to blk_1073741850_1026 (size=8193) 2024-11-16T20:35:20,535 INFO [RS_CLOSE_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/data/default/TestLogRolling-testSlowSyncLogRolling/ae4cd1d3889650ff27c29755dacfee40/.tmp/info/3d1680104948475c8ba019f426d37fc5 2024-11-16T20:35:20,542 DEBUG [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/data/hbase/meta/1588230740/.tmp/info/cb7fba4656bf411cbd7423880a1a64fa is 195, key is TestLogRolling-testSlowSyncLogRolling,,1731789227798.ae4cd1d3889650ff27c29755dacfee40./info:regioninfo/1731789228280/Put/seqid=0 2024-11-16T20:35:20,545 DEBUG [RS_CLOSE_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/data/default/TestLogRolling-testSlowSyncLogRolling/ae4cd1d3889650ff27c29755dacfee40/.tmp/info/3d1680104948475c8ba019f426d37fc5 as hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/data/default/TestLogRolling-testSlowSyncLogRolling/ae4cd1d3889650ff27c29755dacfee40/info/3d1680104948475c8ba019f426d37fc5 2024-11-16T20:35:20,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41759 is added to blk_1073741851_1027 (size=7016) 2024-11-16T20:35:20,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34255 is added to blk_1073741851_1027 (size=7016) 2024-11-16T20:35:20,552 INFO [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.45 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/data/hbase/meta/1588230740/.tmp/info/cb7fba4656bf411cbd7423880a1a64fa 2024-11-16T20:35:20,554 INFO [RS_CLOSE_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/data/default/TestLogRolling-testSlowSyncLogRolling/ae4cd1d3889650ff27c29755dacfee40/info/3d1680104948475c8ba019f426d37fc5, entries=3, sequenceid=48, filesize=8.0 K 2024-11-16T20:35:20,556 INFO [RS_CLOSE_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for ae4cd1d3889650ff27c29755dacfee40 in 36ms, sequenceid=48, compaction requested=true 2024-11-16T20:35:20,556 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731789227798.ae4cd1d3889650ff27c29755dacfee40.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/data/default/TestLogRolling-testSlowSyncLogRolling/ae4cd1d3889650ff27c29755dacfee40/info/ed13fa78cfd040a3ab6bbb77231237c9, hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/data/default/TestLogRolling-testSlowSyncLogRolling/ae4cd1d3889650ff27c29755dacfee40/info/8862305dc79a44d5a7dbbaab22550c57, hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/data/default/TestLogRolling-testSlowSyncLogRolling/ae4cd1d3889650ff27c29755dacfee40/info/87e7c0b2c59447109bd9071e34e4ecce] to archive 2024-11-16T20:35:20,560 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731789227798.ae4cd1d3889650ff27c29755dacfee40.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-16T20:35:20,564 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731789227798.ae4cd1d3889650ff27c29755dacfee40.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/data/default/TestLogRolling-testSlowSyncLogRolling/ae4cd1d3889650ff27c29755dacfee40/info/ed13fa78cfd040a3ab6bbb77231237c9 to hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/archive/data/default/TestLogRolling-testSlowSyncLogRolling/ae4cd1d3889650ff27c29755dacfee40/info/ed13fa78cfd040a3ab6bbb77231237c9 2024-11-16T20:35:20,566 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731789227798.ae4cd1d3889650ff27c29755dacfee40.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/data/default/TestLogRolling-testSlowSyncLogRolling/ae4cd1d3889650ff27c29755dacfee40/info/8862305dc79a44d5a7dbbaab22550c57 to hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/archive/data/default/TestLogRolling-testSlowSyncLogRolling/ae4cd1d3889650ff27c29755dacfee40/info/8862305dc79a44d5a7dbbaab22550c57 2024-11-16T20:35:20,568 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731789227798.ae4cd1d3889650ff27c29755dacfee40.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/data/default/TestLogRolling-testSlowSyncLogRolling/ae4cd1d3889650ff27c29755dacfee40/info/87e7c0b2c59447109bd9071e34e4ecce to hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/archive/data/default/TestLogRolling-testSlowSyncLogRolling/ae4cd1d3889650ff27c29755dacfee40/info/87e7c0b2c59447109bd9071e34e4ecce 2024-11-16T20:35:20,578 DEBUG [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/data/hbase/meta/1588230740/.tmp/ns/33f9646fcdd44c70ae04c25698378ff0 is 43, key is default/ns:d/1731789227586/Put/seqid=0 2024-11-16T20:35:20,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34255 is added to blk_1073741852_1028 (size=5153) 2024-11-16T20:35:20,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41759 is added to blk_1073741852_1028 (size=5153) 2024-11-16T20:35:20,585 INFO [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/data/hbase/meta/1588230740/.tmp/ns/33f9646fcdd44c70ae04c25698378ff0 2024-11-16T20:35:20,581 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731789227798.ae4cd1d3889650ff27c29755dacfee40.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=40c018648b21:34669 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-16T20:35:20,586 WARN [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731789227798.ae4cd1d3889650ff27c29755dacfee40.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [ed13fa78cfd040a3ab6bbb77231237c9=12509, 8862305dc79a44d5a7dbbaab22550c57=12509, 87e7c0b2c59447109bd9071e34e4ecce=12509] 2024-11-16T20:35:20,592 DEBUG [RS_CLOSE_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/data/default/TestLogRolling-testSlowSyncLogRolling/ae4cd1d3889650ff27c29755dacfee40/recovered.edits/51.seqid, newMaxSeqId=51, maxSeqId=1 2024-11-16T20:35:20,594 INFO [RS_CLOSE_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1731789227798.ae4cd1d3889650ff27c29755dacfee40. 2024-11-16T20:35:20,594 DEBUG [RS_CLOSE_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for ae4cd1d3889650ff27c29755dacfee40: Waiting for close lock at 1731789320518Running coprocessor pre-close hooks at 1731789320518Disabling compacts and flushes for region at 1731789320518Disabling writes for close at 1731789320519 (+1 ms)Obtaining lock to block concurrent updates at 1731789320520 (+1 ms)Preparing flush snapshotting stores in ae4cd1d3889650ff27c29755dacfee40 at 1731789320520Finished memstore snapshotting TestLogRolling-testSlowSyncLogRolling,,1731789227798.ae4cd1d3889650ff27c29755dacfee40., syncing WAL and waiting on mvcc, flushsize=dataSize=3228, getHeapSize=3696, getOffHeapSize=0, getCellsCount=3 at 1731789320520Flushing stores of TestLogRolling-testSlowSyncLogRolling,,1731789227798.ae4cd1d3889650ff27c29755dacfee40. at 1731789320522 (+2 ms)Flushing ae4cd1d3889650ff27c29755dacfee40/info: creating writer at 1731789320522Flushing ae4cd1d3889650ff27c29755dacfee40/info: appending metadata at 1731789320526 (+4 ms)Flushing ae4cd1d3889650ff27c29755dacfee40/info: closing flushed file at 1731789320526Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6a6eeab6: reopening flushed file at 1731789320544 (+18 ms)Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for ae4cd1d3889650ff27c29755dacfee40 in 36ms, sequenceid=48, compaction requested=true at 1731789320556 (+12 ms)Writing region close event to WAL at 1731789320587 (+31 ms)Running coprocessor post-close hooks at 1731789320592 (+5 ms)Closed at 1731789320594 (+2 ms) 2024-11-16T20:35:20,594 DEBUG [RS_CLOSE_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testSlowSyncLogRolling,,1731789227798.ae4cd1d3889650ff27c29755dacfee40. 2024-11-16T20:35:20,612 DEBUG [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/data/hbase/meta/1588230740/.tmp/table/f61bb6b2d31e471aa0fa957357e4fee5 is 73, key is TestLogRolling-testSlowSyncLogRolling/table:state/1731789228296/Put/seqid=0 2024-11-16T20:35:20,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41759 is added to blk_1073741853_1029 (size=5396) 2024-11-16T20:35:20,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34255 is added to blk_1073741853_1029 (size=5396) 2024-11-16T20:35:20,619 INFO [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=138 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/data/hbase/meta/1588230740/.tmp/table/f61bb6b2d31e471aa0fa957357e4fee5 2024-11-16T20:35:20,630 DEBUG [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/data/hbase/meta/1588230740/.tmp/info/cb7fba4656bf411cbd7423880a1a64fa as hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/data/hbase/meta/1588230740/info/cb7fba4656bf411cbd7423880a1a64fa 2024-11-16T20:35:20,639 INFO [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/data/hbase/meta/1588230740/info/cb7fba4656bf411cbd7423880a1a64fa, entries=10, sequenceid=11, filesize=6.9 K 2024-11-16T20:35:20,640 DEBUG [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/data/hbase/meta/1588230740/.tmp/ns/33f9646fcdd44c70ae04c25698378ff0 as hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/data/hbase/meta/1588230740/ns/33f9646fcdd44c70ae04c25698378ff0 2024-11-16T20:35:20,649 INFO [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/data/hbase/meta/1588230740/ns/33f9646fcdd44c70ae04c25698378ff0, entries=2, sequenceid=11, filesize=5.0 K 2024-11-16T20:35:20,651 DEBUG [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/data/hbase/meta/1588230740/.tmp/table/f61bb6b2d31e471aa0fa957357e4fee5 as hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/data/hbase/meta/1588230740/table/f61bb6b2d31e471aa0fa957357e4fee5 2024-11-16T20:35:20,660 INFO [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/data/hbase/meta/1588230740/table/f61bb6b2d31e471aa0fa957357e4fee5, entries=2, sequenceid=11, filesize=5.3 K 2024-11-16T20:35:20,661 INFO [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 141ms, sequenceid=11, compaction requested=false 2024-11-16T20:35:20,667 DEBUG [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-16T20:35:20,668 DEBUG [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-16T20:35:20,668 INFO [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-16T20:35:20,668 DEBUG [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731789320520Running coprocessor pre-close hooks at 1731789320520Disabling compacts and flushes for region at 1731789320520Disabling writes for close at 1731789320520Obtaining lock to block concurrent updates at 1731789320521 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1731789320521Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1692, getHeapSize=3696, getOffHeapSize=0, getCellsCount=14 at 1731789320521Flushing stores of hbase:meta,,1.1588230740 at 1731789320523 (+2 ms)Flushing 1588230740/info: creating writer at 1731789320523Flushing 1588230740/info: appending metadata at 1731789320542 (+19 ms)Flushing 1588230740/info: closing flushed file at 1731789320542Flushing 1588230740/ns: creating writer at 1731789320560 (+18 ms)Flushing 1588230740/ns: appending metadata at 1731789320577 (+17 ms)Flushing 1588230740/ns: closing flushed file at 1731789320577Flushing 1588230740/table: creating writer at 1731789320594 (+17 ms)Flushing 1588230740/table: appending metadata at 1731789320611 (+17 ms)Flushing 1588230740/table: closing flushed file at 1731789320611Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6f39d9b0: reopening flushed file at 1731789320628 (+17 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@cc36715: reopening flushed file at 1731789320639 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6dfd452c: reopening flushed file at 1731789320649 (+10 ms)Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 141ms, sequenceid=11, compaction requested=false at 1731789320661 (+12 ms)Writing region close event to WAL at 1731789320662 (+1 ms)Running coprocessor post-close hooks at 1731789320667 (+5 ms)Closed at 1731789320668 (+1 ms) 2024-11-16T20:35:20,668 DEBUG [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-16T20:35:20,721 INFO [RS:0;40c018648b21:41609 {}] regionserver.HRegionServer(976): stopping server 40c018648b21,41609,1731789225506; all regions closed. 2024-11-16T20:35:20,724 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:35:20,725 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:35:20,725 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:35:20,725 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:35:20,726 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:35:20,727 INFO [regionserver/40c018648b21:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-16T20:35:20,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41759 is added to blk_1073741834_1010 (size=3066) 2024-11-16T20:35:20,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34255 is added to blk_1073741834_1010 (size=3066) 2024-11-16T20:35:20,734 DEBUG [RS:0;40c018648b21:41609 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/oldWALs 2024-11-16T20:35:20,734 INFO [RS:0;40c018648b21:41609 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 40c018648b21%2C41609%2C1731789225506.meta:.meta(num 1731789227380) 2024-11-16T20:35:20,735 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:35:20,735 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:35:20,735 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:35:20,735 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:35:20,736 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:35:20,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34255 is added to blk_1073741847_1023 (size=12695) 2024-11-16T20:35:20,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41759 is added to blk_1073741847_1023 (size=12695) 2024-11-16T20:35:20,759 INFO [regionserver/40c018648b21:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-16T20:35:20,759 INFO [regionserver/40c018648b21:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-16T20:35:21,144 DEBUG [RS:0;40c018648b21:41609 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/oldWALs 2024-11-16T20:35:21,144 INFO [RS:0;40c018648b21:41609 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 40c018648b21%2C41609%2C1731789225506:(num 1731789300381) 2024-11-16T20:35:21,144 DEBUG [RS:0;40c018648b21:41609 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T20:35:21,144 INFO [RS:0;40c018648b21:41609 {}] regionserver.LeaseManager(133): Closed leases 2024-11-16T20:35:21,144 INFO [RS:0;40c018648b21:41609 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-16T20:35:21,145 INFO [RS:0;40c018648b21:41609 {}] hbase.ChoreService(370): Chore service for: regionserver/40c018648b21:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-16T20:35:21,145 INFO [RS:0;40c018648b21:41609 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-16T20:35:21,145 INFO [regionserver/40c018648b21:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-16T20:35:21,146 INFO [RS:0;40c018648b21:41609 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41609 2024-11-16T20:35:21,208 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41609-0x101455a73c80001, quorum=127.0.0.1:60663, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/40c018648b21,41609,1731789225506 2024-11-16T20:35:21,208 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34669-0x101455a73c80000, quorum=127.0.0.1:60663, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-16T20:35:21,208 INFO [RS:0;40c018648b21:41609 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-16T20:35:21,211 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [40c018648b21,41609,1731789225506] 2024-11-16T20:35:21,228 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/40c018648b21,41609,1731789225506 already deleted, retry=false 2024-11-16T20:35:21,228 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 40c018648b21,41609,1731789225506 expired; onlineServers=0 2024-11-16T20:35:21,228 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '40c018648b21,34669,1731789224801' ***** 2024-11-16T20:35:21,228 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-16T20:35:21,228 INFO [M:0;40c018648b21:34669 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-16T20:35:21,229 INFO [M:0;40c018648b21:34669 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-16T20:35:21,229 DEBUG [M:0;40c018648b21:34669 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-16T20:35:21,229 DEBUG [M:0;40c018648b21:34669 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-16T20:35:21,229 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-16T20:35:21,229 DEBUG [master/40c018648b21:0:becomeActiveMaster-HFileCleaner.large.0-1731789226590 {}] cleaner.HFileCleaner(306): Exit Thread[master/40c018648b21:0:becomeActiveMaster-HFileCleaner.large.0-1731789226590,5,FailOnTimeoutGroup] 2024-11-16T20:35:21,229 DEBUG [master/40c018648b21:0:becomeActiveMaster-HFileCleaner.small.0-1731789226593 {}] cleaner.HFileCleaner(306): Exit Thread[master/40c018648b21:0:becomeActiveMaster-HFileCleaner.small.0-1731789226593,5,FailOnTimeoutGroup] 2024-11-16T20:35:21,229 INFO [M:0;40c018648b21:34669 {}] hbase.ChoreService(370): Chore service for: master/40c018648b21:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-16T20:35:21,230 INFO [M:0;40c018648b21:34669 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-16T20:35:21,230 DEBUG [M:0;40c018648b21:34669 {}] master.HMaster(1795): Stopping service threads 2024-11-16T20:35:21,230 INFO [M:0;40c018648b21:34669 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-16T20:35:21,230 INFO [M:0;40c018648b21:34669 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-16T20:35:21,231 INFO [M:0;40c018648b21:34669 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-16T20:35:21,231 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-16T20:35:21,238 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34669-0x101455a73c80000, quorum=127.0.0.1:60663, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-16T20:35:21,239 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34669-0x101455a73c80000, quorum=127.0.0.1:60663, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:35:21,239 DEBUG [M:0;40c018648b21:34669 {}] zookeeper.ZKUtil(347): master:34669-0x101455a73c80000, quorum=127.0.0.1:60663, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-16T20:35:21,239 WARN [M:0;40c018648b21:34669 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-16T20:35:21,240 INFO [M:0;40c018648b21:34669 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/.lastflushedseqids 2024-11-16T20:35:21,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34255 is added to blk_1073741854_1030 (size=130) 2024-11-16T20:35:21,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41759 is added to blk_1073741854_1030 (size=130) 2024-11-16T20:35:21,258 INFO [M:0;40c018648b21:34669 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-16T20:35:21,258 INFO [M:0;40c018648b21:34669 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-16T20:35:21,258 DEBUG [M:0;40c018648b21:34669 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-16T20:35:21,258 INFO [M:0;40c018648b21:34669 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T20:35:21,258 DEBUG [M:0;40c018648b21:34669 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T20:35:21,258 DEBUG [M:0;40c018648b21:34669 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-16T20:35:21,258 DEBUG [M:0;40c018648b21:34669 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T20:35:21,259 INFO [M:0;40c018648b21:34669 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.04 KB heapSize=29.21 KB 2024-11-16T20:35:21,277 DEBUG [M:0;40c018648b21:34669 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/25aa8c1a59f84621a0b1efdeb0508902 is 82, key is hbase:meta,,1/info:regioninfo/1731789227449/Put/seqid=0 2024-11-16T20:35:21,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41759 is added to blk_1073741855_1031 (size=5672) 2024-11-16T20:35:21,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34255 is added to blk_1073741855_1031 (size=5672) 2024-11-16T20:35:21,284 INFO [M:0;40c018648b21:34669 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/25aa8c1a59f84621a0b1efdeb0508902 2024-11-16T20:35:21,310 DEBUG [M:0;40c018648b21:34669 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a4d80fa6ece54a33a0f1a04bd0f55e92 is 767, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731789228303/Put/seqid=0 2024-11-16T20:35:21,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41759 is added to blk_1073741856_1032 (size=6248) 2024-11-16T20:35:21,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34255 is added to blk_1073741856_1032 (size=6248) 2024-11-16T20:35:21,317 INFO [M:0;40c018648b21:34669 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.43 KB at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a4d80fa6ece54a33a0f1a04bd0f55e92 2024-11-16T20:35:21,318 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41609-0x101455a73c80001, quorum=127.0.0.1:60663, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T20:35:21,318 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41609-0x101455a73c80001, quorum=127.0.0.1:60663, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T20:35:21,318 INFO [RS:0;40c018648b21:41609 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-16T20:35:21,318 INFO [RS:0;40c018648b21:41609 {}] regionserver.HRegionServer(1031): Exiting; stopping=40c018648b21,41609,1731789225506; zookeeper connection closed. 2024-11-16T20:35:21,318 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@69be8b0f {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@69be8b0f 2024-11-16T20:35:21,319 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-16T20:35:21,323 INFO [M:0;40c018648b21:34669 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for a4d80fa6ece54a33a0f1a04bd0f55e92 2024-11-16T20:35:21,344 DEBUG [M:0;40c018648b21:34669 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/702c17cddf154599abb0e9f303014dcc is 69, key is 40c018648b21,41609,1731789225506/rs:state/1731789226644/Put/seqid=0 2024-11-16T20:35:21,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34255 is added to blk_1073741857_1033 (size=5156) 2024-11-16T20:35:21,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41759 is added to blk_1073741857_1033 (size=5156) 2024-11-16T20:35:21,352 INFO [M:0;40c018648b21:34669 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/702c17cddf154599abb0e9f303014dcc 2024-11-16T20:35:21,381 DEBUG [M:0;40c018648b21:34669 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/6ea50fe961b0423bbc208aaeddc4dbba is 52, key is load_balancer_on/state:d/1731789227772/Put/seqid=0 2024-11-16T20:35:21,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34255 is added to blk_1073741858_1034 (size=5056) 2024-11-16T20:35:21,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41759 is added to blk_1073741858_1034 (size=5056) 2024-11-16T20:35:21,388 INFO [M:0;40c018648b21:34669 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/6ea50fe961b0423bbc208aaeddc4dbba 2024-11-16T20:35:21,396 DEBUG [M:0;40c018648b21:34669 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/25aa8c1a59f84621a0b1efdeb0508902 as hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/25aa8c1a59f84621a0b1efdeb0508902 2024-11-16T20:35:21,403 INFO [M:0;40c018648b21:34669 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/25aa8c1a59f84621a0b1efdeb0508902, entries=8, sequenceid=59, filesize=5.5 K 2024-11-16T20:35:21,404 DEBUG [M:0;40c018648b21:34669 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a4d80fa6ece54a33a0f1a04bd0f55e92 as hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/a4d80fa6ece54a33a0f1a04bd0f55e92 2024-11-16T20:35:21,411 INFO [M:0;40c018648b21:34669 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for a4d80fa6ece54a33a0f1a04bd0f55e92 2024-11-16T20:35:21,412 INFO [M:0;40c018648b21:34669 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/a4d80fa6ece54a33a0f1a04bd0f55e92, entries=6, sequenceid=59, filesize=6.1 K 2024-11-16T20:35:21,413 DEBUG [M:0;40c018648b21:34669 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/702c17cddf154599abb0e9f303014dcc as hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/702c17cddf154599abb0e9f303014dcc 2024-11-16T20:35:21,420 INFO [M:0;40c018648b21:34669 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/702c17cddf154599abb0e9f303014dcc, entries=1, sequenceid=59, filesize=5.0 K 2024-11-16T20:35:21,422 DEBUG [M:0;40c018648b21:34669 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/6ea50fe961b0423bbc208aaeddc4dbba as hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/6ea50fe961b0423bbc208aaeddc4dbba 2024-11-16T20:35:21,428 INFO [M:0;40c018648b21:34669 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/6ea50fe961b0423bbc208aaeddc4dbba, entries=1, sequenceid=59, filesize=4.9 K 2024-11-16T20:35:21,430 INFO [M:0;40c018648b21:34669 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.04 KB/23588, heapSize ~29.15 KB/29848, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 172ms, sequenceid=59, compaction requested=false 2024-11-16T20:35:21,431 INFO [M:0;40c018648b21:34669 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T20:35:21,431 DEBUG [M:0;40c018648b21:34669 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731789321258Disabling compacts and flushes for region at 1731789321258Disabling writes for close at 1731789321258Obtaining lock to block concurrent updates at 1731789321259 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731789321259Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23588, getHeapSize=29848, getOffHeapSize=0, getCellsCount=70 at 1731789321259Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731789321260 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731789321260Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731789321277 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731789321277Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731789321293 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731789321309 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731789321309Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731789321323 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731789321343 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731789321343Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731789321361 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731789321380 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731789321380Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2dc7a6ca: reopening flushed file at 1731789321394 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3ec00cde: reopening flushed file at 1731789321403 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@22efe08d: reopening flushed file at 1731789321412 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1c65c411: reopening flushed file at 1731789321421 (+9 ms)Finished flush of dataSize ~23.04 KB/23588, heapSize ~29.15 KB/29848, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 172ms, sequenceid=59, compaction requested=false at 1731789321430 (+9 ms)Writing region close event to WAL at 1731789321431 (+1 ms)Closed at 1731789321431 2024-11-16T20:35:21,432 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:35:21,432 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:35:21,433 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:35:21,433 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:35:21,433 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:35:21,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34255 is added to blk_1073741830_1006 (size=27985) 2024-11-16T20:35:21,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41759 is added to blk_1073741830_1006 (size=27985) 2024-11-16T20:35:21,436 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-16T20:35:21,436 INFO [M:0;40c018648b21:34669 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-16T20:35:21,436 INFO [M:0;40c018648b21:34669 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34669 2024-11-16T20:35:21,436 INFO [M:0;40c018648b21:34669 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-16T20:35:21,549 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34669-0x101455a73c80000, quorum=127.0.0.1:60663, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T20:35:21,549 INFO [M:0;40c018648b21:34669 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-16T20:35:21,549 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34669-0x101455a73c80000, quorum=127.0.0.1:60663, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T20:35:21,555 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1467625d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T20:35:21,557 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@675921ed{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T20:35:21,557 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T20:35:21,558 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3ec7bf2e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T20:35:21,558 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3369fbc0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2978a67c-c3f1-d077-7f16-9a78bc3638c9/hadoop.log.dir/,STOPPED} 2024-11-16T20:35:21,561 WARN [BP-1072893873-172.17.0.2-1731789220193 heartbeating to localhost/127.0.0.1:38761 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T20:35:21,561 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T20:35:21,561 WARN [BP-1072893873-172.17.0.2-1731789220193 heartbeating to localhost/127.0.0.1:38761 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1072893873-172.17.0.2-1731789220193 (Datanode Uuid c774aad7-0834-41ec-a4bb-98fd9c87ad13) service to localhost/127.0.0.1:38761 2024-11-16T20:35:21,561 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T20:35:21,562 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2978a67c-c3f1-d077-7f16-9a78bc3638c9/cluster_e01d7346-7a30-214e-8c60-80b6047028f5/data/data3/current/BP-1072893873-172.17.0.2-1731789220193 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T20:35:21,562 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2978a67c-c3f1-d077-7f16-9a78bc3638c9/cluster_e01d7346-7a30-214e-8c60-80b6047028f5/data/data4/current/BP-1072893873-172.17.0.2-1731789220193 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T20:35:21,563 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T20:35:21,565 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6c2fdbac{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T20:35:21,565 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@461c65fd{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T20:35:21,565 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T20:35:21,566 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@60d13ec7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T20:35:21,566 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6355b7f5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2978a67c-c3f1-d077-7f16-9a78bc3638c9/hadoop.log.dir/,STOPPED} 2024-11-16T20:35:21,567 WARN [BP-1072893873-172.17.0.2-1731789220193 heartbeating to localhost/127.0.0.1:38761 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T20:35:21,567 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T20:35:21,567 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T20:35:21,567 WARN [BP-1072893873-172.17.0.2-1731789220193 heartbeating to localhost/127.0.0.1:38761 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1072893873-172.17.0.2-1731789220193 (Datanode Uuid 481a69a6-83e4-400d-b7e6-ca0715204381) service to localhost/127.0.0.1:38761 2024-11-16T20:35:21,568 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2978a67c-c3f1-d077-7f16-9a78bc3638c9/cluster_e01d7346-7a30-214e-8c60-80b6047028f5/data/data1/current/BP-1072893873-172.17.0.2-1731789220193 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T20:35:21,568 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2978a67c-c3f1-d077-7f16-9a78bc3638c9/cluster_e01d7346-7a30-214e-8c60-80b6047028f5/data/data2/current/BP-1072893873-172.17.0.2-1731789220193 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T20:35:21,568 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T20:35:21,578 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@735fa16a{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-16T20:35:21,578 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T20:35:21,578 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T20:35:21,579 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T20:35:21,579 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2978a67c-c3f1-d077-7f16-9a78bc3638c9/hadoop.log.dir/,STOPPED} 2024-11-16T20:35:21,587 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-16T20:35:21,620 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-16T20:35:21,630 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=81 (was 12) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:38761 from jenkins.hfs.0 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.0@localhost:38761 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-5-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: SessionTracker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Potentially hanging thread: nioEventLoopGroup-3-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38761 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:38761 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: ForkJoinPool-2-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38761 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/40c018648b21:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-3-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async-Client-Retry-Timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Monitor thread for TaskMonitor java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38761 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-2-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: org.apache.hadoop.hdfs.PeerCache@5f2d9ff8 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RpcClient-timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: HMaster-EventLoopGroup-1-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: Idle-Rpc-Conn-Sweeper-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: weak-ref-cleaner-strictcontextstorage java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HBase-Metrics2-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: ForkJoinPool-2-worker-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: ForkJoinPool-2-worker-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:38761 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-3-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: HMaster-EventLoopGroup-1-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:38761 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: SSL Certificates Store Monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-5-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/40c018648b21:0.procedureResultReporter java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Potentially hanging thread: nioEventLoopGroup-4-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: master/40c018648b21:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) - Thread LEAK? -, OpenFileDescriptor=402 (was 287) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=200 (was 125) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=5041 (was 5442) 2024-11-16T20:35:21,637 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=82, OpenFileDescriptor=402, MaxFileDescriptor=1048576, SystemLoadAverage=200, ProcessCount=11, AvailableMemoryMB=5040 2024-11-16T20:35:21,637 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-16T20:35:21,637 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2978a67c-c3f1-d077-7f16-9a78bc3638c9/hadoop.log.dir so I do NOT create it in target/test-data/e13aac79-29ff-a2e1-2fbf-5424728eb35f 2024-11-16T20:35:21,637 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2978a67c-c3f1-d077-7f16-9a78bc3638c9/hadoop.tmp.dir so I do NOT create it in target/test-data/e13aac79-29ff-a2e1-2fbf-5424728eb35f 2024-11-16T20:35:21,637 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e13aac79-29ff-a2e1-2fbf-5424728eb35f/cluster_c4982f83-97c5-5372-4e5b-bd31ac7c1668, deleteOnExit=true 2024-11-16T20:35:21,637 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-16T20:35:21,638 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e13aac79-29ff-a2e1-2fbf-5424728eb35f/test.cache.data in system properties and HBase conf 2024-11-16T20:35:21,638 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e13aac79-29ff-a2e1-2fbf-5424728eb35f/hadoop.tmp.dir in system properties and HBase conf 2024-11-16T20:35:21,638 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e13aac79-29ff-a2e1-2fbf-5424728eb35f/hadoop.log.dir in system properties and HBase conf 2024-11-16T20:35:21,638 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e13aac79-29ff-a2e1-2fbf-5424728eb35f/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-16T20:35:21,638 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e13aac79-29ff-a2e1-2fbf-5424728eb35f/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-16T20:35:21,638 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-16T20:35:21,638 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-16T20:35:21,638 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e13aac79-29ff-a2e1-2fbf-5424728eb35f/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-16T20:35:21,638 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e13aac79-29ff-a2e1-2fbf-5424728eb35f/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-16T20:35:21,639 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e13aac79-29ff-a2e1-2fbf-5424728eb35f/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-16T20:35:21,639 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e13aac79-29ff-a2e1-2fbf-5424728eb35f/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-16T20:35:21,639 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e13aac79-29ff-a2e1-2fbf-5424728eb35f/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-16T20:35:21,639 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e13aac79-29ff-a2e1-2fbf-5424728eb35f/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-16T20:35:21,639 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e13aac79-29ff-a2e1-2fbf-5424728eb35f/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-16T20:35:21,639 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e13aac79-29ff-a2e1-2fbf-5424728eb35f/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-16T20:35:21,639 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e13aac79-29ff-a2e1-2fbf-5424728eb35f/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-16T20:35:21,639 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e13aac79-29ff-a2e1-2fbf-5424728eb35f/nfs.dump.dir in system properties and HBase conf 2024-11-16T20:35:21,639 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e13aac79-29ff-a2e1-2fbf-5424728eb35f/java.io.tmpdir in system properties and HBase conf 2024-11-16T20:35:21,639 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e13aac79-29ff-a2e1-2fbf-5424728eb35f/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-16T20:35:21,639 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e13aac79-29ff-a2e1-2fbf-5424728eb35f/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-16T20:35:21,640 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e13aac79-29ff-a2e1-2fbf-5424728eb35f/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-16T20:35:21,653 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-16T20:35:21,995 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T20:35:22,001 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T20:35:22,005 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T20:35:22,005 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T20:35:22,005 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-16T20:35:22,006 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T20:35:22,006 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@65506a11{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e13aac79-29ff-a2e1-2fbf-5424728eb35f/hadoop.log.dir/,AVAILABLE} 2024-11-16T20:35:22,007 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@75cbfab9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T20:35:22,109 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@493d1d34{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e13aac79-29ff-a2e1-2fbf-5424728eb35f/java.io.tmpdir/jetty-localhost-43517-hadoop-hdfs-3_4_1-tests_jar-_-any-8147765734394141358/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-16T20:35:22,110 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6a249094{HTTP/1.1, (http/1.1)}{localhost:43517} 2024-11-16T20:35:22,110 INFO [Time-limited test {}] server.Server(415): Started @103942ms 2024-11-16T20:35:22,124 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-16T20:35:22,368 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T20:35:22,372 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T20:35:22,373 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T20:35:22,373 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T20:35:22,374 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-16T20:35:22,374 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6c8914e7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e13aac79-29ff-a2e1-2fbf-5424728eb35f/hadoop.log.dir/,AVAILABLE} 2024-11-16T20:35:22,374 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3aa9354f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T20:35:22,471 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@52b07bdb{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e13aac79-29ff-a2e1-2fbf-5424728eb35f/java.io.tmpdir/jetty-localhost-37377-hadoop-hdfs-3_4_1-tests_jar-_-any-9604610578809685524/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T20:35:22,471 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@70121b28{HTTP/1.1, (http/1.1)}{localhost:37377} 2024-11-16T20:35:22,471 INFO [Time-limited test {}] server.Server(415): Started @104304ms 2024-11-16T20:35:22,473 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T20:35:22,509 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T20:35:22,513 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T20:35:22,514 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T20:35:22,514 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T20:35:22,514 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-16T20:35:22,515 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7b915b67{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e13aac79-29ff-a2e1-2fbf-5424728eb35f/hadoop.log.dir/,AVAILABLE} 2024-11-16T20:35:22,515 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1bc8c098{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T20:35:22,614 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3f5c23ef{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e13aac79-29ff-a2e1-2fbf-5424728eb35f/java.io.tmpdir/jetty-localhost-42521-hadoop-hdfs-3_4_1-tests_jar-_-any-10667288671131970060/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T20:35:22,614 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@716c7b87{HTTP/1.1, (http/1.1)}{localhost:42521} 2024-11-16T20:35:22,615 INFO [Time-limited test {}] server.Server(415): Started @104447ms 2024-11-16T20:35:22,616 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T20:35:23,716 WARN [Thread-452 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e13aac79-29ff-a2e1-2fbf-5424728eb35f/cluster_c4982f83-97c5-5372-4e5b-bd31ac7c1668/data/data1/current/BP-1039840100-172.17.0.2-1731789321665/current, will proceed with Du for space computation calculation, 2024-11-16T20:35:23,716 WARN [Thread-453 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e13aac79-29ff-a2e1-2fbf-5424728eb35f/cluster_c4982f83-97c5-5372-4e5b-bd31ac7c1668/data/data2/current/BP-1039840100-172.17.0.2-1731789321665/current, will proceed with Du for space computation calculation, 2024-11-16T20:35:23,742 WARN [Thread-416 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T20:35:23,744 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x74c016792e829f6f with lease ID 0xc93e0c989bf9b6c2: Processing first storage report for DS-864189a7-a1df-4818-8686-88df34f57227 from datanode DatanodeRegistration(127.0.0.1:32989, datanodeUuid=b6d9c285-aa1b-4279-8558-73ae5dbfe6fe, infoPort=43281, infoSecurePort=0, ipcPort=33081, storageInfo=lv=-57;cid=testClusterID;nsid=1744788723;c=1731789321665) 2024-11-16T20:35:23,744 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x74c016792e829f6f with lease ID 0xc93e0c989bf9b6c2: from storage DS-864189a7-a1df-4818-8686-88df34f57227 node DatanodeRegistration(127.0.0.1:32989, datanodeUuid=b6d9c285-aa1b-4279-8558-73ae5dbfe6fe, infoPort=43281, infoSecurePort=0, ipcPort=33081, storageInfo=lv=-57;cid=testClusterID;nsid=1744788723;c=1731789321665), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T20:35:23,745 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x74c016792e829f6f with lease ID 0xc93e0c989bf9b6c2: Processing first storage report for DS-eedc07ae-469e-4130-92a4-831d66f94303 from datanode DatanodeRegistration(127.0.0.1:32989, datanodeUuid=b6d9c285-aa1b-4279-8558-73ae5dbfe6fe, infoPort=43281, infoSecurePort=0, ipcPort=33081, storageInfo=lv=-57;cid=testClusterID;nsid=1744788723;c=1731789321665) 2024-11-16T20:35:23,745 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x74c016792e829f6f with lease ID 0xc93e0c989bf9b6c2: from storage DS-eedc07ae-469e-4130-92a4-831d66f94303 node DatanodeRegistration(127.0.0.1:32989, datanodeUuid=b6d9c285-aa1b-4279-8558-73ae5dbfe6fe, infoPort=43281, infoSecurePort=0, ipcPort=33081, storageInfo=lv=-57;cid=testClusterID;nsid=1744788723;c=1731789321665), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T20:35:23,837 WARN [Thread-464 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e13aac79-29ff-a2e1-2fbf-5424728eb35f/cluster_c4982f83-97c5-5372-4e5b-bd31ac7c1668/data/data4/current/BP-1039840100-172.17.0.2-1731789321665/current, will proceed with Du for space computation calculation, 2024-11-16T20:35:23,837 WARN [Thread-463 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e13aac79-29ff-a2e1-2fbf-5424728eb35f/cluster_c4982f83-97c5-5372-4e5b-bd31ac7c1668/data/data3/current/BP-1039840100-172.17.0.2-1731789321665/current, will proceed with Du for space computation calculation, 2024-11-16T20:35:23,857 WARN [Thread-439 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T20:35:23,859 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xcc6c3839e5ec2ae7 with lease ID 0xc93e0c989bf9b6c3: Processing first storage report for DS-b775021e-44f8-4850-8ba6-c3c80f05a4d8 from datanode DatanodeRegistration(127.0.0.1:35369, datanodeUuid=734540b4-b0d9-49ab-a452-596c3799a21d, infoPort=43693, infoSecurePort=0, ipcPort=45995, storageInfo=lv=-57;cid=testClusterID;nsid=1744788723;c=1731789321665) 2024-11-16T20:35:23,859 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcc6c3839e5ec2ae7 with lease ID 0xc93e0c989bf9b6c3: from storage DS-b775021e-44f8-4850-8ba6-c3c80f05a4d8 node DatanodeRegistration(127.0.0.1:35369, datanodeUuid=734540b4-b0d9-49ab-a452-596c3799a21d, infoPort=43693, infoSecurePort=0, ipcPort=45995, storageInfo=lv=-57;cid=testClusterID;nsid=1744788723;c=1731789321665), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T20:35:23,859 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xcc6c3839e5ec2ae7 with lease ID 0xc93e0c989bf9b6c3: Processing first storage report for DS-b804478f-e8a1-4d4f-bb46-e0f8a8d06063 from datanode DatanodeRegistration(127.0.0.1:35369, datanodeUuid=734540b4-b0d9-49ab-a452-596c3799a21d, infoPort=43693, infoSecurePort=0, ipcPort=45995, storageInfo=lv=-57;cid=testClusterID;nsid=1744788723;c=1731789321665) 2024-11-16T20:35:23,859 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcc6c3839e5ec2ae7 with lease ID 0xc93e0c989bf9b6c3: from storage DS-b804478f-e8a1-4d4f-bb46-e0f8a8d06063 node DatanodeRegistration(127.0.0.1:35369, datanodeUuid=734540b4-b0d9-49ab-a452-596c3799a21d, infoPort=43693, infoSecurePort=0, ipcPort=45995, storageInfo=lv=-57;cid=testClusterID;nsid=1744788723;c=1731789321665), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T20:35:23,965 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e13aac79-29ff-a2e1-2fbf-5424728eb35f 2024-11-16T20:35:23,972 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e13aac79-29ff-a2e1-2fbf-5424728eb35f/cluster_c4982f83-97c5-5372-4e5b-bd31ac7c1668/zookeeper_0, clientPort=52732, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e13aac79-29ff-a2e1-2fbf-5424728eb35f/cluster_c4982f83-97c5-5372-4e5b-bd31ac7c1668/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e13aac79-29ff-a2e1-2fbf-5424728eb35f/cluster_c4982f83-97c5-5372-4e5b-bd31ac7c1668/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-16T20:35:23,973 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=52732 2024-11-16T20:35:23,973 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T20:35:23,975 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T20:35:23,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35369 is added to blk_1073741825_1001 (size=7) 2024-11-16T20:35:23,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32989 is added to blk_1073741825_1001 (size=7) 2024-11-16T20:35:23,985 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:46027/user/jenkins/test-data/acc813a2-5dbd-888d-2ad5-208b3edfabe8 with version=8 2024-11-16T20:35:23,986 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/hbase-staging 2024-11-16T20:35:23,988 INFO [Time-limited test {}] client.ConnectionUtils(128): master/40c018648b21:0 server-side Connection retries=45 2024-11-16T20:35:23,988 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T20:35:23,988 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-16T20:35:23,988 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-16T20:35:23,988 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T20:35:23,988 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-16T20:35:23,989 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-16T20:35:23,989 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-16T20:35:23,989 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42909 2024-11-16T20:35:23,991 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:42909 connecting to ZooKeeper ensemble=127.0.0.1:52732 2024-11-16T20:35:24,086 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:429090x0, quorum=127.0.0.1:52732, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-16T20:35:24,087 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:42909-0x101455bfa6d0000 connected 2024-11-16T20:35:24,172 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T20:35:24,174 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T20:35:24,177 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42909-0x101455bfa6d0000, quorum=127.0.0.1:52732, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T20:35:24,178 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:46027/user/jenkins/test-data/acc813a2-5dbd-888d-2ad5-208b3edfabe8, hbase.cluster.distributed=false 2024-11-16T20:35:24,180 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42909-0x101455bfa6d0000, quorum=127.0.0.1:52732, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-16T20:35:24,180 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42909 2024-11-16T20:35:24,180 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42909 2024-11-16T20:35:24,181 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42909 2024-11-16T20:35:24,181 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42909 2024-11-16T20:35:24,181 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42909 2024-11-16T20:35:24,196 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/40c018648b21:0 server-side Connection retries=45 2024-11-16T20:35:24,196 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T20:35:24,196 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-16T20:35:24,196 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-16T20:35:24,197 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T20:35:24,197 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-16T20:35:24,197 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-16T20:35:24,197 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-16T20:35:24,197 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44015 2024-11-16T20:35:24,199 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:44015 connecting to ZooKeeper ensemble=127.0.0.1:52732 2024-11-16T20:35:24,200 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T20:35:24,202 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T20:35:24,214 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:440150x0, quorum=127.0.0.1:52732, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-16T20:35:24,214 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:44015-0x101455bfa6d0001 connected 2024-11-16T20:35:24,214 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44015-0x101455bfa6d0001, quorum=127.0.0.1:52732, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T20:35:24,215 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-16T20:35:24,215 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-16T20:35:24,216 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44015-0x101455bfa6d0001, quorum=127.0.0.1:52732, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-16T20:35:24,217 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44015-0x101455bfa6d0001, quorum=127.0.0.1:52732, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-16T20:35:24,219 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44015 2024-11-16T20:35:24,219 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44015 2024-11-16T20:35:24,220 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44015 2024-11-16T20:35:24,220 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44015 2024-11-16T20:35:24,220 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44015 2024-11-16T20:35:24,237 DEBUG [M:0;40c018648b21:42909 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;40c018648b21:42909 2024-11-16T20:35:24,238 INFO [master/40c018648b21:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/40c018648b21,42909,1731789323988 2024-11-16T20:35:24,245 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44015-0x101455bfa6d0001, quorum=127.0.0.1:52732, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T20:35:24,245 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42909-0x101455bfa6d0000, quorum=127.0.0.1:52732, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T20:35:24,246 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42909-0x101455bfa6d0000, quorum=127.0.0.1:52732, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/40c018648b21,42909,1731789323988 2024-11-16T20:35:24,256 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42909-0x101455bfa6d0000, quorum=127.0.0.1:52732, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:35:24,256 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44015-0x101455bfa6d0001, quorum=127.0.0.1:52732, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-16T20:35:24,256 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44015-0x101455bfa6d0001, quorum=127.0.0.1:52732, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:35:24,257 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42909-0x101455bfa6d0000, quorum=127.0.0.1:52732, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-16T20:35:24,258 INFO [master/40c018648b21:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/40c018648b21,42909,1731789323988 from backup master directory 2024-11-16T20:35:24,267 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42909-0x101455bfa6d0000, quorum=127.0.0.1:52732, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/40c018648b21,42909,1731789323988 2024-11-16T20:35:24,267 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44015-0x101455bfa6d0001, quorum=127.0.0.1:52732, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T20:35:24,267 WARN [master/40c018648b21:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-16T20:35:24,267 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42909-0x101455bfa6d0000, quorum=127.0.0.1:52732, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T20:35:24,267 INFO [master/40c018648b21:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=40c018648b21,42909,1731789323988 2024-11-16T20:35:24,273 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:46027/user/jenkins/test-data/acc813a2-5dbd-888d-2ad5-208b3edfabe8/hbase.id] with ID: a41b6807-3a02-4e31-ab2b-fb85ae7c7682 2024-11-16T20:35:24,273 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:46027/user/jenkins/test-data/acc813a2-5dbd-888d-2ad5-208b3edfabe8/.tmp/hbase.id 2024-11-16T20:35:24,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32989 is added to blk_1073741826_1002 (size=42) 2024-11-16T20:35:24,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35369 is added to blk_1073741826_1002 (size=42) 2024-11-16T20:35:24,281 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:46027/user/jenkins/test-data/acc813a2-5dbd-888d-2ad5-208b3edfabe8/.tmp/hbase.id]:[hdfs://localhost:46027/user/jenkins/test-data/acc813a2-5dbd-888d-2ad5-208b3edfabe8/hbase.id] 2024-11-16T20:35:24,297 INFO [master/40c018648b21:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T20:35:24,297 INFO [master/40c018648b21:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-16T20:35:24,298 INFO [master/40c018648b21:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-16T20:35:24,309 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44015-0x101455bfa6d0001, quorum=127.0.0.1:52732, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:35:24,309 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42909-0x101455bfa6d0000, quorum=127.0.0.1:52732, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:35:24,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35369 is added to blk_1073741827_1003 (size=196) 2024-11-16T20:35:24,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32989 is added to blk_1073741827_1003 (size=196) 2024-11-16T20:35:24,319 INFO [master/40c018648b21:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-16T20:35:24,320 INFO [master/40c018648b21:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-16T20:35:24,321 INFO [master/40c018648b21:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T20:35:24,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35369 is added to blk_1073741828_1004 (size=1189) 2024-11-16T20:35:24,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32989 is added to blk_1073741828_1004 (size=1189) 2024-11-16T20:35:24,329 INFO [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:46027/user/jenkins/test-data/acc813a2-5dbd-888d-2ad5-208b3edfabe8/MasterData/data/master/store 2024-11-16T20:35:24,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32989 is added to blk_1073741829_1005 (size=34) 2024-11-16T20:35:24,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35369 is added to blk_1073741829_1005 (size=34) 2024-11-16T20:35:24,337 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T20:35:24,337 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-16T20:35:24,337 INFO [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T20:35:24,338 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T20:35:24,338 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-16T20:35:24,338 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T20:35:24,338 INFO [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T20:35:24,338 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731789324337Disabling compacts and flushes for region at 1731789324337Disabling writes for close at 1731789324338 (+1 ms)Writing region close event to WAL at 1731789324338Closed at 1731789324338 2024-11-16T20:35:24,339 WARN [master/40c018648b21:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:46027/user/jenkins/test-data/acc813a2-5dbd-888d-2ad5-208b3edfabe8/MasterData/data/master/store/.initializing 2024-11-16T20:35:24,339 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:46027/user/jenkins/test-data/acc813a2-5dbd-888d-2ad5-208b3edfabe8/MasterData/WALs/40c018648b21,42909,1731789323988 2024-11-16T20:35:24,343 INFO [master/40c018648b21:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=40c018648b21%2C42909%2C1731789323988, suffix=, logDir=hdfs://localhost:46027/user/jenkins/test-data/acc813a2-5dbd-888d-2ad5-208b3edfabe8/MasterData/WALs/40c018648b21,42909,1731789323988, archiveDir=hdfs://localhost:46027/user/jenkins/test-data/acc813a2-5dbd-888d-2ad5-208b3edfabe8/MasterData/oldWALs, maxLogs=10 2024-11-16T20:35:24,343 INFO [master/40c018648b21:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 40c018648b21%2C42909%2C1731789323988.1731789324343 2024-11-16T20:35:24,349 INFO [master/40c018648b21:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/acc813a2-5dbd-888d-2ad5-208b3edfabe8/MasterData/WALs/40c018648b21,42909,1731789323988/40c018648b21%2C42909%2C1731789323988.1731789324343 2024-11-16T20:35:24,350 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43281:43281),(127.0.0.1/127.0.0.1:43693:43693)] 2024-11-16T20:35:24,351 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-16T20:35:24,351 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T20:35:24,351 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T20:35:24,351 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T20:35:24,353 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T20:35:24,354 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-16T20:35:24,354 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:35:24,355 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T20:35:24,355 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T20:35:24,357 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-16T20:35:24,357 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:35:24,358 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T20:35:24,358 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T20:35:24,361 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-16T20:35:24,361 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:35:24,362 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T20:35:24,362 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T20:35:24,364 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-16T20:35:24,365 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:35:24,365 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T20:35:24,366 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T20:35:24,367 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46027/user/jenkins/test-data/acc813a2-5dbd-888d-2ad5-208b3edfabe8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-16T20:35:24,367 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46027/user/jenkins/test-data/acc813a2-5dbd-888d-2ad5-208b3edfabe8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-16T20:35:24,369 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T20:35:24,369 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T20:35:24,370 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-16T20:35:24,371 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T20:35:24,374 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46027/user/jenkins/test-data/acc813a2-5dbd-888d-2ad5-208b3edfabe8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T20:35:24,374 INFO [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=774594, jitterRate=-0.01505357027053833}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-16T20:35:24,375 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731789324351Initializing all the Stores at 1731789324352 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731789324352Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731789324352Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731789324352Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731789324353 (+1 ms)Cleaning up temporary data from old regions at 1731789324369 (+16 ms)Region opened successfully at 1731789324375 (+6 ms) 2024-11-16T20:35:24,375 INFO [master/40c018648b21:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-16T20:35:24,379 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@136816ad, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=40c018648b21/172.17.0.2:0 2024-11-16T20:35:24,380 INFO [master/40c018648b21:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-16T20:35:24,380 INFO [master/40c018648b21:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-16T20:35:24,380 INFO [master/40c018648b21:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-16T20:35:24,381 INFO [master/40c018648b21:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-16T20:35:24,381 INFO [master/40c018648b21:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-16T20:35:24,382 INFO [master/40c018648b21:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-16T20:35:24,382 INFO [master/40c018648b21:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-16T20:35:24,384 INFO [master/40c018648b21:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-16T20:35:24,385 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42909-0x101455bfa6d0000, quorum=127.0.0.1:52732, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-16T20:35:24,393 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-16T20:35:24,393 INFO [master/40c018648b21:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-16T20:35:24,394 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42909-0x101455bfa6d0000, quorum=127.0.0.1:52732, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-16T20:35:24,403 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-16T20:35:24,404 INFO [master/40c018648b21:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-16T20:35:24,405 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42909-0x101455bfa6d0000, quorum=127.0.0.1:52732, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-16T20:35:24,414 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-16T20:35:24,415 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42909-0x101455bfa6d0000, quorum=127.0.0.1:52732, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-16T20:35:24,424 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-16T20:35:24,429 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42909-0x101455bfa6d0000, quorum=127.0.0.1:52732, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-16T20:35:24,442 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-16T20:35:24,457 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42909-0x101455bfa6d0000, quorum=127.0.0.1:52732, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-16T20:35:24,457 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44015-0x101455bfa6d0001, quorum=127.0.0.1:52732, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-16T20:35:24,457 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42909-0x101455bfa6d0000, quorum=127.0.0.1:52732, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:35:24,457 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44015-0x101455bfa6d0001, quorum=127.0.0.1:52732, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:35:24,458 INFO [master/40c018648b21:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=40c018648b21,42909,1731789323988, sessionid=0x101455bfa6d0000, setting cluster-up flag (Was=false) 2024-11-16T20:35:24,477 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42909-0x101455bfa6d0000, quorum=127.0.0.1:52732, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:35:24,477 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44015-0x101455bfa6d0001, quorum=127.0.0.1:52732, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:35:24,509 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-16T20:35:24,510 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=40c018648b21,42909,1731789323988 2024-11-16T20:35:24,530 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44015-0x101455bfa6d0001, quorum=127.0.0.1:52732, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:35:24,530 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42909-0x101455bfa6d0000, quorum=127.0.0.1:52732, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:35:24,562 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-16T20:35:24,566 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=40c018648b21,42909,1731789323988 2024-11-16T20:35:24,569 INFO [master/40c018648b21:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:46027/user/jenkins/test-data/acc813a2-5dbd-888d-2ad5-208b3edfabe8/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-16T20:35:24,574 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-16T20:35:24,575 INFO [master/40c018648b21:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-16T20:35:24,575 INFO [master/40c018648b21:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-16T20:35:24,576 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 40c018648b21,42909,1731789323988 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-16T20:35:24,579 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/40c018648b21:0, corePoolSize=5, maxPoolSize=5 2024-11-16T20:35:24,579 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/40c018648b21:0, corePoolSize=5, maxPoolSize=5 2024-11-16T20:35:24,579 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/40c018648b21:0, corePoolSize=5, maxPoolSize=5 2024-11-16T20:35:24,580 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/40c018648b21:0, corePoolSize=5, maxPoolSize=5 2024-11-16T20:35:24,580 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/40c018648b21:0, corePoolSize=10, maxPoolSize=10 2024-11-16T20:35:24,580 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:35:24,580 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/40c018648b21:0, corePoolSize=2, maxPoolSize=2 2024-11-16T20:35:24,580 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:35:24,581 INFO [master/40c018648b21:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731789354581 2024-11-16T20:35:24,581 INFO [master/40c018648b21:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-16T20:35:24,581 INFO [master/40c018648b21:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-16T20:35:24,581 INFO [master/40c018648b21:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-16T20:35:24,582 INFO [master/40c018648b21:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-16T20:35:24,582 INFO [master/40c018648b21:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-16T20:35:24,582 INFO [master/40c018648b21:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-16T20:35:24,582 INFO [master/40c018648b21:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-16T20:35:24,582 INFO [master/40c018648b21:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-16T20:35:24,582 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T20:35:24,582 INFO [master/40c018648b21:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-16T20:35:24,582 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-16T20:35:24,583 INFO [master/40c018648b21:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-16T20:35:24,583 INFO [master/40c018648b21:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-16T20:35:24,583 INFO [master/40c018648b21:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-16T20:35:24,583 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/40c018648b21:0:becomeActiveMaster-HFileCleaner.large.0-1731789324583,5,FailOnTimeoutGroup] 2024-11-16T20:35:24,583 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/40c018648b21:0:becomeActiveMaster-HFileCleaner.small.0-1731789324583,5,FailOnTimeoutGroup] 2024-11-16T20:35:24,584 INFO [master/40c018648b21:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-16T20:35:24,584 INFO [master/40c018648b21:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-16T20:35:24,584 INFO [master/40c018648b21:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-16T20:35:24,584 INFO [master/40c018648b21:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-16T20:35:24,584 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:35:24,584 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-16T20:35:24,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32989 is added to blk_1073741831_1007 (size=1321) 2024-11-16T20:35:24,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35369 is added to blk_1073741831_1007 (size=1321) 2024-11-16T20:35:24,593 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:46027/user/jenkins/test-data/acc813a2-5dbd-888d-2ad5-208b3edfabe8/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-16T20:35:24,593 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:46027/user/jenkins/test-data/acc813a2-5dbd-888d-2ad5-208b3edfabe8 2024-11-16T20:35:24,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32989 is added to blk_1073741832_1008 (size=32) 2024-11-16T20:35:24,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35369 is added to blk_1073741832_1008 (size=32) 2024-11-16T20:35:24,606 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T20:35:24,607 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-16T20:35:24,609 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-16T20:35:24,609 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:35:24,610 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T20:35:24,610 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-16T20:35:24,612 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-16T20:35:24,612 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:35:24,612 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T20:35:24,613 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-16T20:35:24,614 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-16T20:35:24,615 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:35:24,615 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T20:35:24,615 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-16T20:35:24,617 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-16T20:35:24,617 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:35:24,618 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T20:35:24,618 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-16T20:35:24,618 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46027/user/jenkins/test-data/acc813a2-5dbd-888d-2ad5-208b3edfabe8/data/hbase/meta/1588230740 2024-11-16T20:35:24,619 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46027/user/jenkins/test-data/acc813a2-5dbd-888d-2ad5-208b3edfabe8/data/hbase/meta/1588230740 2024-11-16T20:35:24,620 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-16T20:35:24,620 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-16T20:35:24,620 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-16T20:35:24,622 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-16T20:35:24,623 INFO [RS:0;40c018648b21:44015 {}] regionserver.HRegionServer(746): ClusterId : a41b6807-3a02-4e31-ab2b-fb85ae7c7682 2024-11-16T20:35:24,623 DEBUG [RS:0;40c018648b21:44015 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-16T20:35:24,625 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46027/user/jenkins/test-data/acc813a2-5dbd-888d-2ad5-208b3edfabe8/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T20:35:24,625 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=748025, jitterRate=-0.048837706446647644}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-16T20:35:24,626 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731789324606Initializing all the Stores at 1731789324607 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731789324607Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731789324607Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731789324607Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731789324607Cleaning up temporary data from old regions at 1731789324620 (+13 ms)Region opened successfully at 1731789324626 (+6 ms) 2024-11-16T20:35:24,626 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-16T20:35:24,627 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-16T20:35:24,627 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-16T20:35:24,627 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-16T20:35:24,627 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-16T20:35:24,627 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-16T20:35:24,627 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731789324626Disabling compacts and flushes for region at 1731789324626Disabling writes for close at 1731789324627 (+1 ms)Writing region close event to WAL at 1731789324627Closed at 1731789324627 2024-11-16T20:35:24,629 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T20:35:24,629 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-16T20:35:24,629 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-16T20:35:24,631 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-16T20:35:24,632 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-16T20:35:24,636 DEBUG [RS:0;40c018648b21:44015 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-16T20:35:24,636 DEBUG [RS:0;40c018648b21:44015 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-16T20:35:24,646 DEBUG [RS:0;40c018648b21:44015 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-16T20:35:24,647 DEBUG [RS:0;40c018648b21:44015 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4eeaca7c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=40c018648b21/172.17.0.2:0 2024-11-16T20:35:24,658 DEBUG [RS:0;40c018648b21:44015 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;40c018648b21:44015 2024-11-16T20:35:24,658 INFO [RS:0;40c018648b21:44015 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-16T20:35:24,658 INFO [RS:0;40c018648b21:44015 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-16T20:35:24,658 DEBUG [RS:0;40c018648b21:44015 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-16T20:35:24,659 INFO [RS:0;40c018648b21:44015 {}] regionserver.HRegionServer(2659): reportForDuty to master=40c018648b21,42909,1731789323988 with port=44015, startcode=1731789324196 2024-11-16T20:35:24,659 DEBUG [RS:0;40c018648b21:44015 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-16T20:35:24,662 INFO [HMaster-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46659, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-16T20:35:24,662 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42909 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 40c018648b21,44015,1731789324196 2024-11-16T20:35:24,662 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42909 {}] master.ServerManager(517): Registering regionserver=40c018648b21,44015,1731789324196 2024-11-16T20:35:24,665 DEBUG [RS:0;40c018648b21:44015 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:46027/user/jenkins/test-data/acc813a2-5dbd-888d-2ad5-208b3edfabe8 2024-11-16T20:35:24,665 DEBUG [RS:0;40c018648b21:44015 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:46027 2024-11-16T20:35:24,665 DEBUG [RS:0;40c018648b21:44015 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-16T20:35:24,677 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42909-0x101455bfa6d0000, quorum=127.0.0.1:52732, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-16T20:35:24,678 DEBUG [RS:0;40c018648b21:44015 {}] zookeeper.ZKUtil(111): regionserver:44015-0x101455bfa6d0001, quorum=127.0.0.1:52732, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/40c018648b21,44015,1731789324196 2024-11-16T20:35:24,678 WARN [RS:0;40c018648b21:44015 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-16T20:35:24,678 INFO [RS:0;40c018648b21:44015 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T20:35:24,678 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [40c018648b21,44015,1731789324196] 2024-11-16T20:35:24,678 DEBUG [RS:0;40c018648b21:44015 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:46027/user/jenkins/test-data/acc813a2-5dbd-888d-2ad5-208b3edfabe8/WALs/40c018648b21,44015,1731789324196 2024-11-16T20:35:24,682 INFO [RS:0;40c018648b21:44015 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-16T20:35:24,686 INFO [RS:0;40c018648b21:44015 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-16T20:35:24,687 INFO [RS:0;40c018648b21:44015 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-16T20:35:24,687 INFO [RS:0;40c018648b21:44015 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T20:35:24,687 INFO [RS:0;40c018648b21:44015 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-16T20:35:24,688 INFO [RS:0;40c018648b21:44015 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-16T20:35:24,689 INFO [RS:0;40c018648b21:44015 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-16T20:35:24,689 DEBUG [RS:0;40c018648b21:44015 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:35:24,689 DEBUG [RS:0;40c018648b21:44015 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:35:24,689 DEBUG [RS:0;40c018648b21:44015 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:35:24,689 DEBUG [RS:0;40c018648b21:44015 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:35:24,689 DEBUG [RS:0;40c018648b21:44015 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:35:24,689 DEBUG [RS:0;40c018648b21:44015 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/40c018648b21:0, corePoolSize=2, maxPoolSize=2 2024-11-16T20:35:24,689 DEBUG [RS:0;40c018648b21:44015 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:35:24,689 DEBUG [RS:0;40c018648b21:44015 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:35:24,689 DEBUG [RS:0;40c018648b21:44015 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:35:24,690 DEBUG [RS:0;40c018648b21:44015 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:35:24,690 DEBUG [RS:0;40c018648b21:44015 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:35:24,690 DEBUG [RS:0;40c018648b21:44015 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:35:24,690 DEBUG [RS:0;40c018648b21:44015 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/40c018648b21:0, corePoolSize=3, maxPoolSize=3 2024-11-16T20:35:24,690 DEBUG [RS:0;40c018648b21:44015 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/40c018648b21:0, corePoolSize=3, maxPoolSize=3 2024-11-16T20:35:24,691 INFO [RS:0;40c018648b21:44015 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T20:35:24,691 INFO [RS:0;40c018648b21:44015 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T20:35:24,691 INFO [RS:0;40c018648b21:44015 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T20:35:24,691 INFO [RS:0;40c018648b21:44015 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-16T20:35:24,691 INFO [RS:0;40c018648b21:44015 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-16T20:35:24,691 INFO [RS:0;40c018648b21:44015 {}] hbase.ChoreService(168): Chore ScheduledChore name=40c018648b21,44015,1731789324196-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-16T20:35:24,706 INFO [RS:0;40c018648b21:44015 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-16T20:35:24,707 INFO [RS:0;40c018648b21:44015 {}] hbase.ChoreService(168): Chore ScheduledChore name=40c018648b21,44015,1731789324196-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T20:35:24,707 INFO [RS:0;40c018648b21:44015 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T20:35:24,707 INFO [RS:0;40c018648b21:44015 {}] regionserver.Replication(171): 40c018648b21,44015,1731789324196 started 2024-11-16T20:35:24,723 INFO [RS:0;40c018648b21:44015 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T20:35:24,723 INFO [RS:0;40c018648b21:44015 {}] regionserver.HRegionServer(1482): Serving as 40c018648b21,44015,1731789324196, RpcServer on 40c018648b21/172.17.0.2:44015, sessionid=0x101455bfa6d0001 2024-11-16T20:35:24,723 DEBUG [RS:0;40c018648b21:44015 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-16T20:35:24,723 DEBUG [RS:0;40c018648b21:44015 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 40c018648b21,44015,1731789324196 2024-11-16T20:35:24,723 DEBUG [RS:0;40c018648b21:44015 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '40c018648b21,44015,1731789324196' 2024-11-16T20:35:24,724 DEBUG [RS:0;40c018648b21:44015 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-16T20:35:24,724 DEBUG [RS:0;40c018648b21:44015 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-16T20:35:24,725 DEBUG [RS:0;40c018648b21:44015 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-16T20:35:24,725 DEBUG [RS:0;40c018648b21:44015 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-16T20:35:24,725 DEBUG [RS:0;40c018648b21:44015 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 40c018648b21,44015,1731789324196 2024-11-16T20:35:24,725 DEBUG [RS:0;40c018648b21:44015 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '40c018648b21,44015,1731789324196' 2024-11-16T20:35:24,725 DEBUG [RS:0;40c018648b21:44015 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-16T20:35:24,726 DEBUG [RS:0;40c018648b21:44015 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-16T20:35:24,726 DEBUG [RS:0;40c018648b21:44015 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-16T20:35:24,726 INFO [RS:0;40c018648b21:44015 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-16T20:35:24,726 INFO [RS:0;40c018648b21:44015 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-16T20:35:24,783 WARN [40c018648b21:42909 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-16T20:35:24,829 INFO [RS:0;40c018648b21:44015 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=40c018648b21%2C44015%2C1731789324196, suffix=, logDir=hdfs://localhost:46027/user/jenkins/test-data/acc813a2-5dbd-888d-2ad5-208b3edfabe8/WALs/40c018648b21,44015,1731789324196, archiveDir=hdfs://localhost:46027/user/jenkins/test-data/acc813a2-5dbd-888d-2ad5-208b3edfabe8/oldWALs, maxLogs=32 2024-11-16T20:35:24,832 INFO [RS:0;40c018648b21:44015 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 40c018648b21%2C44015%2C1731789324196.1731789324832 2024-11-16T20:35:24,842 INFO [RS:0;40c018648b21:44015 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/acc813a2-5dbd-888d-2ad5-208b3edfabe8/WALs/40c018648b21,44015,1731789324196/40c018648b21%2C44015%2C1731789324196.1731789324832 2024-11-16T20:35:24,843 DEBUG [RS:0;40c018648b21:44015 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43693:43693),(127.0.0.1/127.0.0.1:43281:43281)] 2024-11-16T20:35:25,033 DEBUG [40c018648b21:42909 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-16T20:35:25,035 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=40c018648b21,44015,1731789324196 2024-11-16T20:35:25,040 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 40c018648b21,44015,1731789324196, state=OPENING 2024-11-16T20:35:25,052 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-16T20:35:25,136 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42909-0x101455bfa6d0000, quorum=127.0.0.1:52732, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:35:25,136 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44015-0x101455bfa6d0001, quorum=127.0.0.1:52732, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:35:25,138 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-16T20:35:25,138 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T20:35:25,138 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T20:35:25,138 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=40c018648b21,44015,1731789324196}] 2024-11-16T20:35:25,143 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-16T20:35:25,144 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-16T20:35:25,145 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-16T20:35:25,292 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-16T20:35:25,295 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35501, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-16T20:35:25,299 INFO [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-16T20:35:25,300 INFO [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T20:35:25,302 INFO [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=40c018648b21%2C44015%2C1731789324196.meta, suffix=.meta, logDir=hdfs://localhost:46027/user/jenkins/test-data/acc813a2-5dbd-888d-2ad5-208b3edfabe8/WALs/40c018648b21,44015,1731789324196, archiveDir=hdfs://localhost:46027/user/jenkins/test-data/acc813a2-5dbd-888d-2ad5-208b3edfabe8/oldWALs, maxLogs=32 2024-11-16T20:35:25,305 INFO [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 40c018648b21%2C44015%2C1731789324196.meta.1731789325305.meta 2024-11-16T20:35:25,312 INFO [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/acc813a2-5dbd-888d-2ad5-208b3edfabe8/WALs/40c018648b21,44015,1731789324196/40c018648b21%2C44015%2C1731789324196.meta.1731789325305.meta 2024-11-16T20:35:25,315 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43281:43281),(127.0.0.1/127.0.0.1:43693:43693)] 2024-11-16T20:35:25,318 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-16T20:35:25,319 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-16T20:35:25,319 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-16T20:35:25,319 INFO [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-16T20:35:25,319 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-16T20:35:25,319 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T20:35:25,319 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-16T20:35:25,319 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-16T20:35:25,322 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-16T20:35:25,323 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-16T20:35:25,324 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:35:25,324 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T20:35:25,324 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-16T20:35:25,326 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-16T20:35:25,326 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:35:25,326 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T20:35:25,326 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-16T20:35:25,327 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-16T20:35:25,328 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:35:25,328 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T20:35:25,328 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-16T20:35:25,329 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-16T20:35:25,329 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:35:25,330 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T20:35:25,330 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-16T20:35:25,331 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46027/user/jenkins/test-data/acc813a2-5dbd-888d-2ad5-208b3edfabe8/data/hbase/meta/1588230740 2024-11-16T20:35:25,333 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46027/user/jenkins/test-data/acc813a2-5dbd-888d-2ad5-208b3edfabe8/data/hbase/meta/1588230740 2024-11-16T20:35:25,334 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-16T20:35:25,334 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-16T20:35:25,334 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-16T20:35:25,336 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-16T20:35:25,337 INFO [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=807046, jitterRate=0.026213020086288452}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-16T20:35:25,337 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-16T20:35:25,338 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731789325320Writing region info on filesystem at 1731789325320Initializing all the Stores at 1731789325321 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731789325321Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731789325322 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731789325322Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731789325322Cleaning up temporary data from old regions at 1731789325334 (+12 ms)Running coprocessor post-open hooks at 1731789325337 (+3 ms)Region opened successfully at 1731789325338 (+1 ms) 2024-11-16T20:35:25,339 INFO [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731789325292 2024-11-16T20:35:25,342 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-16T20:35:25,342 INFO [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-16T20:35:25,344 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=40c018648b21,44015,1731789324196 2024-11-16T20:35:25,345 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 40c018648b21,44015,1731789324196, state=OPEN 2024-11-16T20:35:25,440 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42909-0x101455bfa6d0000, quorum=127.0.0.1:52732, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-16T20:35:25,440 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44015-0x101455bfa6d0001, quorum=127.0.0.1:52732, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-16T20:35:25,440 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T20:35:25,440 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T20:35:25,440 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=40c018648b21,44015,1731789324196 2024-11-16T20:35:25,444 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-16T20:35:25,445 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=40c018648b21,44015,1731789324196 in 302 msec 2024-11-16T20:35:25,449 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-16T20:35:25,449 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 815 msec 2024-11-16T20:35:25,450 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T20:35:25,450 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-16T20:35:25,451 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T20:35:25,452 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=40c018648b21,44015,1731789324196, seqNum=-1] 2024-11-16T20:35:25,452 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T20:35:25,454 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46775, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T20:35:25,461 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 888 msec 2024-11-16T20:35:25,461 INFO [master/40c018648b21:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731789325461, completionTime=-1 2024-11-16T20:35:25,461 INFO [master/40c018648b21:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-16T20:35:25,462 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-16T20:35:25,463 INFO [master/40c018648b21:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-16T20:35:25,463 INFO [master/40c018648b21:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731789385463 2024-11-16T20:35:25,464 INFO [master/40c018648b21:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731789445463 2024-11-16T20:35:25,464 INFO [master/40c018648b21:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-16T20:35:25,464 INFO [master/40c018648b21:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=40c018648b21,42909,1731789323988-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T20:35:25,464 INFO [master/40c018648b21:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=40c018648b21,42909,1731789323988-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T20:35:25,464 INFO [master/40c018648b21:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=40c018648b21,42909,1731789323988-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T20:35:25,464 INFO [master/40c018648b21:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-40c018648b21:42909, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T20:35:25,464 INFO [master/40c018648b21:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-16T20:35:25,464 INFO [master/40c018648b21:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-16T20:35:25,466 DEBUG [master/40c018648b21:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-16T20:35:25,469 INFO [master/40c018648b21:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.202sec 2024-11-16T20:35:25,469 INFO [master/40c018648b21:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-16T20:35:25,469 INFO [master/40c018648b21:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-16T20:35:25,469 INFO [master/40c018648b21:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-16T20:35:25,469 INFO [master/40c018648b21:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-16T20:35:25,469 INFO [master/40c018648b21:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-16T20:35:25,469 INFO [master/40c018648b21:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=40c018648b21,42909,1731789323988-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-16T20:35:25,469 INFO [master/40c018648b21:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=40c018648b21,42909,1731789323988-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-16T20:35:25,472 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-16T20:35:25,472 INFO [master/40c018648b21:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-16T20:35:25,472 INFO [master/40c018648b21:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=40c018648b21,42909,1731789323988-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T20:35:25,523 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@57927b81, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T20:35:25,524 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 40c018648b21,42909,-1 for getting cluster id 2024-11-16T20:35:25,524 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-16T20:35:25,526 DEBUG [HMaster-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a41b6807-3a02-4e31-ab2b-fb85ae7c7682' 2024-11-16T20:35:25,527 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-16T20:35:25,527 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a41b6807-3a02-4e31-ab2b-fb85ae7c7682" 2024-11-16T20:35:25,527 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@56abadfd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T20:35:25,527 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [40c018648b21,42909,-1] 2024-11-16T20:35:25,527 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-16T20:35:25,528 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T20:35:25,530 INFO [HMaster-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47832, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-16T20:35:25,531 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6aa3a1e6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T20:35:25,532 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T20:35:25,533 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=40c018648b21,44015,1731789324196, seqNum=-1] 2024-11-16T20:35:25,534 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T20:35:25,535 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43604, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T20:35:25,538 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=40c018648b21,42909,1731789323988 2024-11-16T20:35:25,539 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T20:35:25,542 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-16T20:35:25,543 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-16T20:35:25,543 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-16T20:35:25,543 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T20:35:25,543 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T20:35:25,543 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T20:35:25,544 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-16T20:35:25,544 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1676223287, stopped=false 2024-11-16T20:35:25,544 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-16T20:35:25,544 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=40c018648b21,42909,1731789323988 2024-11-16T20:35:25,596 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:35:25,602 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:35:25,632 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42909-0x101455bfa6d0000, quorum=127.0.0.1:52732, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-16T20:35:25,632 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44015-0x101455bfa6d0001, quorum=127.0.0.1:52732, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-16T20:35:25,632 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42909-0x101455bfa6d0000, quorum=127.0.0.1:52732, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:35:25,632 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44015-0x101455bfa6d0001, quorum=127.0.0.1:52732, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:35:25,632 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-16T20:35:25,632 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-16T20:35:25,632 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T20:35:25,632 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T20:35:25,633 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:42909-0x101455bfa6d0000, quorum=127.0.0.1:52732, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T20:35:25,633 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:44015-0x101455bfa6d0001, quorum=127.0.0.1:52732, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T20:35:25,633 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '40c018648b21,44015,1731789324196' ***** 2024-11-16T20:35:25,633 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-16T20:35:25,633 INFO [RS:0;40c018648b21:44015 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-16T20:35:25,633 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-16T20:35:25,633 INFO [RS:0;40c018648b21:44015 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-16T20:35:25,633 INFO [RS:0;40c018648b21:44015 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-16T20:35:25,634 INFO [RS:0;40c018648b21:44015 {}] regionserver.HRegionServer(959): stopping server 40c018648b21,44015,1731789324196 2024-11-16T20:35:25,634 INFO [RS:0;40c018648b21:44015 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-16T20:35:25,634 INFO [RS:0;40c018648b21:44015 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;40c018648b21:44015. 2024-11-16T20:35:25,634 DEBUG [RS:0;40c018648b21:44015 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T20:35:25,634 DEBUG [RS:0;40c018648b21:44015 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T20:35:25,634 INFO [RS:0;40c018648b21:44015 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-16T20:35:25,634 INFO [RS:0;40c018648b21:44015 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-16T20:35:25,634 INFO [RS:0;40c018648b21:44015 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-16T20:35:25,634 INFO [RS:0;40c018648b21:44015 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-16T20:35:25,635 INFO [RS:0;40c018648b21:44015 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-16T20:35:25,635 DEBUG [RS:0;40c018648b21:44015 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-16T20:35:25,635 DEBUG [RS:0;40c018648b21:44015 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-16T20:35:25,635 DEBUG [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-16T20:35:25,635 INFO [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-16T20:35:25,635 DEBUG [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-16T20:35:25,635 DEBUG [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-16T20:35:25,635 DEBUG [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-16T20:35:25,635 INFO [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-16T20:35:25,654 DEBUG [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/acc813a2-5dbd-888d-2ad5-208b3edfabe8/data/hbase/meta/1588230740/.tmp/ns/01ea6afa3d644c9d95adc0c87c2e771d is 43, key is default/ns:d/1731789325454/Put/seqid=0 2024-11-16T20:35:25,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32989 is added to blk_1073741835_1011 (size=5153) 2024-11-16T20:35:25,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35369 is added to blk_1073741835_1011 (size=5153) 2024-11-16T20:35:25,660 INFO [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/acc813a2-5dbd-888d-2ad5-208b3edfabe8/data/hbase/meta/1588230740/.tmp/ns/01ea6afa3d644c9d95adc0c87c2e771d 2024-11-16T20:35:25,667 DEBUG [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/acc813a2-5dbd-888d-2ad5-208b3edfabe8/data/hbase/meta/1588230740/.tmp/ns/01ea6afa3d644c9d95adc0c87c2e771d as hdfs://localhost:46027/user/jenkins/test-data/acc813a2-5dbd-888d-2ad5-208b3edfabe8/data/hbase/meta/1588230740/ns/01ea6afa3d644c9d95adc0c87c2e771d 2024-11-16T20:35:25,676 INFO [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46027/user/jenkins/test-data/acc813a2-5dbd-888d-2ad5-208b3edfabe8/data/hbase/meta/1588230740/ns/01ea6afa3d644c9d95adc0c87c2e771d, entries=2, sequenceid=6, filesize=5.0 K 2024-11-16T20:35:25,677 INFO [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 42ms, sequenceid=6, compaction requested=false 2024-11-16T20:35:25,677 DEBUG [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-16T20:35:25,689 DEBUG [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46027/user/jenkins/test-data/acc813a2-5dbd-888d-2ad5-208b3edfabe8/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-16T20:35:25,690 DEBUG [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-16T20:35:25,690 INFO [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-16T20:35:25,691 DEBUG [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731789325635Running coprocessor pre-close hooks at 1731789325635Disabling compacts and flushes for region at 1731789325635Disabling writes for close at 1731789325635Obtaining lock to block concurrent updates at 1731789325636 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1731789325636Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1731789325636Flushing stores of hbase:meta,,1.1588230740 at 1731789325637 (+1 ms)Flushing 1588230740/ns: creating writer at 1731789325637Flushing 1588230740/ns: appending metadata at 1731789325654 (+17 ms)Flushing 1588230740/ns: closing flushed file at 1731789325654Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1e47d7fb: reopening flushed file at 1731789325666 (+12 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 42ms, sequenceid=6, compaction requested=false at 1731789325677 (+11 ms)Writing region close event to WAL at 1731789325684 (+7 ms)Running coprocessor post-close hooks at 1731789325690 (+6 ms)Closed at 1731789325690 2024-11-16T20:35:25,691 DEBUG [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-16T20:35:25,693 INFO [regionserver/40c018648b21:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-16T20:35:25,693 INFO [regionserver/40c018648b21:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-16T20:35:25,835 INFO [RS:0;40c018648b21:44015 {}] regionserver.HRegionServer(976): stopping server 40c018648b21,44015,1731789324196; all regions closed. 2024-11-16T20:35:25,836 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:35:25,836 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:35:25,837 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:35:25,837 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:35:25,837 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:35:25,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35369 is added to blk_1073741834_1010 (size=1152) 2024-11-16T20:35:25,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32989 is added to blk_1073741834_1010 (size=1152) 2024-11-16T20:35:25,848 DEBUG [RS:0;40c018648b21:44015 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/acc813a2-5dbd-888d-2ad5-208b3edfabe8/oldWALs 2024-11-16T20:35:25,848 INFO [RS:0;40c018648b21:44015 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 40c018648b21%2C44015%2C1731789324196.meta:.meta(num 1731789325305) 2024-11-16T20:35:25,849 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:35:25,849 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:35:25,849 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:35:25,849 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:35:25,849 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:35:25,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32989 is added to blk_1073741833_1009 (size=93) 2024-11-16T20:35:25,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35369 is added to blk_1073741833_1009 (size=93) 2024-11-16T20:35:25,855 DEBUG [RS:0;40c018648b21:44015 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/acc813a2-5dbd-888d-2ad5-208b3edfabe8/oldWALs 2024-11-16T20:35:25,855 INFO [RS:0;40c018648b21:44015 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 40c018648b21%2C44015%2C1731789324196:(num 1731789324832) 2024-11-16T20:35:25,855 DEBUG [RS:0;40c018648b21:44015 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T20:35:25,855 INFO [RS:0;40c018648b21:44015 {}] regionserver.LeaseManager(133): Closed leases 2024-11-16T20:35:25,855 INFO [RS:0;40c018648b21:44015 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-16T20:35:25,855 INFO [RS:0;40c018648b21:44015 {}] hbase.ChoreService(370): Chore service for: regionserver/40c018648b21:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-16T20:35:25,855 INFO [RS:0;40c018648b21:44015 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-16T20:35:25,855 INFO [regionserver/40c018648b21:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-16T20:35:25,855 INFO [RS:0;40c018648b21:44015 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44015 2024-11-16T20:35:25,895 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42909-0x101455bfa6d0000, quorum=127.0.0.1:52732, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-16T20:35:25,895 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44015-0x101455bfa6d0001, quorum=127.0.0.1:52732, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/40c018648b21,44015,1731789324196 2024-11-16T20:35:25,895 INFO [RS:0;40c018648b21:44015 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-16T20:35:25,979 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [40c018648b21,44015,1731789324196] 2024-11-16T20:35:25,991 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/40c018648b21,44015,1731789324196 already deleted, retry=false 2024-11-16T20:35:25,992 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 40c018648b21,44015,1731789324196 expired; onlineServers=0 2024-11-16T20:35:25,992 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '40c018648b21,42909,1731789323988' ***** 2024-11-16T20:35:25,992 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-16T20:35:25,992 INFO [M:0;40c018648b21:42909 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-16T20:35:25,992 INFO [M:0;40c018648b21:42909 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-16T20:35:25,993 DEBUG [M:0;40c018648b21:42909 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-16T20:35:25,993 DEBUG [M:0;40c018648b21:42909 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-16T20:35:25,993 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-16T20:35:25,993 DEBUG [master/40c018648b21:0:becomeActiveMaster-HFileCleaner.large.0-1731789324583 {}] cleaner.HFileCleaner(306): Exit Thread[master/40c018648b21:0:becomeActiveMaster-HFileCleaner.large.0-1731789324583,5,FailOnTimeoutGroup] 2024-11-16T20:35:25,993 DEBUG [master/40c018648b21:0:becomeActiveMaster-HFileCleaner.small.0-1731789324583 {}] cleaner.HFileCleaner(306): Exit Thread[master/40c018648b21:0:becomeActiveMaster-HFileCleaner.small.0-1731789324583,5,FailOnTimeoutGroup] 2024-11-16T20:35:25,994 INFO [M:0;40c018648b21:42909 {}] hbase.ChoreService(370): Chore service for: master/40c018648b21:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-16T20:35:25,994 INFO [M:0;40c018648b21:42909 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-16T20:35:25,994 DEBUG [M:0;40c018648b21:42909 {}] master.HMaster(1795): Stopping service threads 2024-11-16T20:35:25,994 INFO [M:0;40c018648b21:42909 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-16T20:35:25,994 INFO [M:0;40c018648b21:42909 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-16T20:35:25,995 INFO [M:0;40c018648b21:42909 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-16T20:35:25,995 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-16T20:35:26,003 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42909-0x101455bfa6d0000, quorum=127.0.0.1:52732, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-16T20:35:26,003 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42909-0x101455bfa6d0000, quorum=127.0.0.1:52732, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:35:26,003 DEBUG [M:0;40c018648b21:42909 {}] zookeeper.ZKUtil(347): master:42909-0x101455bfa6d0000, quorum=127.0.0.1:52732, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-16T20:35:26,004 WARN [M:0;40c018648b21:42909 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-16T20:35:26,004 INFO [M:0;40c018648b21:42909 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:46027/user/jenkins/test-data/acc813a2-5dbd-888d-2ad5-208b3edfabe8/.lastflushedseqids 2024-11-16T20:35:26,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35369 is added to blk_1073741836_1012 (size=99) 2024-11-16T20:35:26,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32989 is added to blk_1073741836_1012 (size=99) 2024-11-16T20:35:26,014 INFO [M:0;40c018648b21:42909 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-16T20:35:26,014 INFO [M:0;40c018648b21:42909 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-16T20:35:26,015 DEBUG [M:0;40c018648b21:42909 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-16T20:35:26,015 INFO [M:0;40c018648b21:42909 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T20:35:26,015 DEBUG [M:0;40c018648b21:42909 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T20:35:26,015 DEBUG [M:0;40c018648b21:42909 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-16T20:35:26,015 DEBUG [M:0;40c018648b21:42909 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T20:35:26,015 INFO [M:0;40c018648b21:42909 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-16T20:35:26,035 DEBUG [M:0;40c018648b21:42909 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/acc813a2-5dbd-888d-2ad5-208b3edfabe8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d644a2c27f5c4ffbb6aae65885ec78fc is 82, key is hbase:meta,,1/info:regioninfo/1731789325343/Put/seqid=0 2024-11-16T20:35:26,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35369 is added to blk_1073741837_1013 (size=5672) 2024-11-16T20:35:26,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32989 is added to blk_1073741837_1013 (size=5672) 2024-11-16T20:35:26,041 INFO [M:0;40c018648b21:42909 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/acc813a2-5dbd-888d-2ad5-208b3edfabe8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d644a2c27f5c4ffbb6aae65885ec78fc 2024-11-16T20:35:26,062 DEBUG [M:0;40c018648b21:42909 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/acc813a2-5dbd-888d-2ad5-208b3edfabe8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a6bd035295724d9280bca925353d37ad is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1731789325460/Put/seqid=0 2024-11-16T20:35:26,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35369 is added to blk_1073741838_1014 (size=5275) 2024-11-16T20:35:26,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32989 is added to blk_1073741838_1014 (size=5275) 2024-11-16T20:35:26,068 INFO [M:0;40c018648b21:42909 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/acc813a2-5dbd-888d-2ad5-208b3edfabe8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a6bd035295724d9280bca925353d37ad 2024-11-16T20:35:26,079 INFO [RS:0;40c018648b21:44015 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-16T20:35:26,079 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44015-0x101455bfa6d0001, quorum=127.0.0.1:52732, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T20:35:26,079 INFO [RS:0;40c018648b21:44015 {}] regionserver.HRegionServer(1031): Exiting; stopping=40c018648b21,44015,1731789324196; zookeeper connection closed. 2024-11-16T20:35:26,079 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44015-0x101455bfa6d0001, quorum=127.0.0.1:52732, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T20:35:26,079 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@3261a6ac {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@3261a6ac 2024-11-16T20:35:26,079 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-16T20:35:26,088 DEBUG [M:0;40c018648b21:42909 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/acc813a2-5dbd-888d-2ad5-208b3edfabe8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d53fddd5348f47e299e2847e495fe681 is 69, key is 40c018648b21,44015,1731789324196/rs:state/1731789324663/Put/seqid=0 2024-11-16T20:35:26,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35369 is added to blk_1073741839_1015 (size=5156) 2024-11-16T20:35:26,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32989 is added to blk_1073741839_1015 (size=5156) 2024-11-16T20:35:26,093 INFO [M:0;40c018648b21:42909 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/acc813a2-5dbd-888d-2ad5-208b3edfabe8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d53fddd5348f47e299e2847e495fe681 2024-11-16T20:35:26,115 DEBUG [M:0;40c018648b21:42909 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/acc813a2-5dbd-888d-2ad5-208b3edfabe8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/f55928af0d694437804e708ed9cc2f67 is 52, key is load_balancer_on/state:d/1731789325541/Put/seqid=0 2024-11-16T20:35:26,116 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-16T20:35:26,119 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:35:26,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32989 is added to blk_1073741840_1016 (size=5056) 2024-11-16T20:35:26,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35369 is added to blk_1073741840_1016 (size=5056) 2024-11-16T20:35:26,121 INFO [M:0;40c018648b21:42909 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/acc813a2-5dbd-888d-2ad5-208b3edfabe8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/f55928af0d694437804e708ed9cc2f67 2024-11-16T20:35:26,128 DEBUG [M:0;40c018648b21:42909 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/acc813a2-5dbd-888d-2ad5-208b3edfabe8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d644a2c27f5c4ffbb6aae65885ec78fc as hdfs://localhost:46027/user/jenkins/test-data/acc813a2-5dbd-888d-2ad5-208b3edfabe8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/d644a2c27f5c4ffbb6aae65885ec78fc 2024-11-16T20:35:26,135 INFO [M:0;40c018648b21:42909 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46027/user/jenkins/test-data/acc813a2-5dbd-888d-2ad5-208b3edfabe8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/d644a2c27f5c4ffbb6aae65885ec78fc, entries=8, sequenceid=29, filesize=5.5 K 2024-11-16T20:35:26,136 DEBUG [M:0;40c018648b21:42909 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/acc813a2-5dbd-888d-2ad5-208b3edfabe8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a6bd035295724d9280bca925353d37ad as hdfs://localhost:46027/user/jenkins/test-data/acc813a2-5dbd-888d-2ad5-208b3edfabe8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/a6bd035295724d9280bca925353d37ad 2024-11-16T20:35:26,140 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:35:26,143 INFO [M:0;40c018648b21:42909 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46027/user/jenkins/test-data/acc813a2-5dbd-888d-2ad5-208b3edfabe8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/a6bd035295724d9280bca925353d37ad, entries=3, sequenceid=29, filesize=5.2 K 2024-11-16T20:35:26,145 DEBUG [M:0;40c018648b21:42909 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/acc813a2-5dbd-888d-2ad5-208b3edfabe8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d53fddd5348f47e299e2847e495fe681 as hdfs://localhost:46027/user/jenkins/test-data/acc813a2-5dbd-888d-2ad5-208b3edfabe8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/d53fddd5348f47e299e2847e495fe681 2024-11-16T20:35:26,151 INFO [M:0;40c018648b21:42909 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46027/user/jenkins/test-data/acc813a2-5dbd-888d-2ad5-208b3edfabe8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/d53fddd5348f47e299e2847e495fe681, entries=1, sequenceid=29, filesize=5.0 K 2024-11-16T20:35:26,153 DEBUG [M:0;40c018648b21:42909 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/acc813a2-5dbd-888d-2ad5-208b3edfabe8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/f55928af0d694437804e708ed9cc2f67 as hdfs://localhost:46027/user/jenkins/test-data/acc813a2-5dbd-888d-2ad5-208b3edfabe8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/f55928af0d694437804e708ed9cc2f67 2024-11-16T20:35:26,160 INFO [M:0;40c018648b21:42909 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46027/user/jenkins/test-data/acc813a2-5dbd-888d-2ad5-208b3edfabe8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/f55928af0d694437804e708ed9cc2f67, entries=1, sequenceid=29, filesize=4.9 K 2024-11-16T20:35:26,161 INFO [M:0;40c018648b21:42909 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 146ms, sequenceid=29, compaction requested=false 2024-11-16T20:35:26,163 INFO [M:0;40c018648b21:42909 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T20:35:26,163 DEBUG [M:0;40c018648b21:42909 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731789326015Disabling compacts and flushes for region at 1731789326015Disabling writes for close at 1731789326015Obtaining lock to block concurrent updates at 1731789326015Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731789326015Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1731789326016 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731789326017 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731789326017Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731789326034 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731789326034Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731789326046 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731789326062 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731789326062Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731789326073 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731789326087 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731789326087Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731789326098 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731789326114 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731789326114Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5bd90874: reopening flushed file at 1731789326127 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@9ed115a: reopening flushed file at 1731789326135 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2a3d6897: reopening flushed file at 1731789326143 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7da8e9a6: reopening flushed file at 1731789326152 (+9 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 146ms, sequenceid=29, compaction requested=false at 1731789326161 (+9 ms)Writing region close event to WAL at 1731789326163 (+2 ms)Closed at 1731789326163 2024-11-16T20:35:26,164 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:35:26,164 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:35:26,164 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:35:26,164 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:35:26,164 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:35:26,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35369 is added to blk_1073741830_1006 (size=10311) 2024-11-16T20:35:26,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32989 is added to blk_1073741830_1006 (size=10311) 2024-11-16T20:35:26,168 INFO [M:0;40c018648b21:42909 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-16T20:35:26,168 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-16T20:35:26,168 INFO [M:0;40c018648b21:42909 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42909 2024-11-16T20:35:26,168 INFO [M:0;40c018648b21:42909 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-16T20:35:26,293 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42909-0x101455bfa6d0000, quorum=127.0.0.1:52732, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T20:35:26,293 INFO [M:0;40c018648b21:42909 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-16T20:35:26,293 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42909-0x101455bfa6d0000, quorum=127.0.0.1:52732, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T20:35:26,297 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3f5c23ef{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T20:35:26,297 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@716c7b87{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T20:35:26,297 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T20:35:26,297 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1bc8c098{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T20:35:26,297 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7b915b67{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e13aac79-29ff-a2e1-2fbf-5424728eb35f/hadoop.log.dir/,STOPPED} 2024-11-16T20:35:26,298 WARN [BP-1039840100-172.17.0.2-1731789321665 heartbeating to localhost/127.0.0.1:46027 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T20:35:26,298 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T20:35:26,298 WARN [BP-1039840100-172.17.0.2-1731789321665 heartbeating to localhost/127.0.0.1:46027 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1039840100-172.17.0.2-1731789321665 (Datanode Uuid 734540b4-b0d9-49ab-a452-596c3799a21d) service to localhost/127.0.0.1:46027 2024-11-16T20:35:26,298 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T20:35:26,299 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e13aac79-29ff-a2e1-2fbf-5424728eb35f/cluster_c4982f83-97c5-5372-4e5b-bd31ac7c1668/data/data3/current/BP-1039840100-172.17.0.2-1731789321665 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T20:35:26,299 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e13aac79-29ff-a2e1-2fbf-5424728eb35f/cluster_c4982f83-97c5-5372-4e5b-bd31ac7c1668/data/data4/current/BP-1039840100-172.17.0.2-1731789321665 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T20:35:26,299 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T20:35:26,301 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@52b07bdb{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T20:35:26,302 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@70121b28{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T20:35:26,302 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T20:35:26,302 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3aa9354f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T20:35:26,302 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6c8914e7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e13aac79-29ff-a2e1-2fbf-5424728eb35f/hadoop.log.dir/,STOPPED} 2024-11-16T20:35:26,303 WARN [BP-1039840100-172.17.0.2-1731789321665 heartbeating to localhost/127.0.0.1:46027 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T20:35:26,303 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T20:35:26,303 WARN [BP-1039840100-172.17.0.2-1731789321665 heartbeating to localhost/127.0.0.1:46027 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1039840100-172.17.0.2-1731789321665 (Datanode Uuid b6d9c285-aa1b-4279-8558-73ae5dbfe6fe) service to localhost/127.0.0.1:46027 2024-11-16T20:35:26,303 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T20:35:26,304 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e13aac79-29ff-a2e1-2fbf-5424728eb35f/cluster_c4982f83-97c5-5372-4e5b-bd31ac7c1668/data/data1/current/BP-1039840100-172.17.0.2-1731789321665 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T20:35:26,304 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e13aac79-29ff-a2e1-2fbf-5424728eb35f/cluster_c4982f83-97c5-5372-4e5b-bd31ac7c1668/data/data2/current/BP-1039840100-172.17.0.2-1731789321665 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T20:35:26,304 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T20:35:26,310 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@493d1d34{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-16T20:35:26,311 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6a249094{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T20:35:26,311 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T20:35:26,311 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@75cbfab9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T20:35:26,312 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@65506a11{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e13aac79-29ff-a2e1-2fbf-5424728eb35f/hadoop.log.dir/,STOPPED} 2024-11-16T20:35:26,318 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-16T20:35:26,334 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-16T20:35:26,334 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-16T20:35:26,334 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e13aac79-29ff-a2e1-2fbf-5424728eb35f/hadoop.log.dir so I do NOT create it in target/test-data/64da23ca-2820-0825-2f43-3ba7ac80db0b 2024-11-16T20:35:26,334 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e13aac79-29ff-a2e1-2fbf-5424728eb35f/hadoop.tmp.dir so I do NOT create it in target/test-data/64da23ca-2820-0825-2f43-3ba7ac80db0b 2024-11-16T20:35:26,334 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64da23ca-2820-0825-2f43-3ba7ac80db0b/cluster_1dc8cece-ac3d-b8bc-ccd8-8c4a0146ef64, deleteOnExit=true 2024-11-16T20:35:26,334 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-16T20:35:26,334 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64da23ca-2820-0825-2f43-3ba7ac80db0b/test.cache.data in system properties and HBase conf 2024-11-16T20:35:26,335 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64da23ca-2820-0825-2f43-3ba7ac80db0b/hadoop.tmp.dir in system properties and HBase conf 2024-11-16T20:35:26,335 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64da23ca-2820-0825-2f43-3ba7ac80db0b/hadoop.log.dir in system properties and HBase conf 2024-11-16T20:35:26,335 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64da23ca-2820-0825-2f43-3ba7ac80db0b/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-16T20:35:26,335 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64da23ca-2820-0825-2f43-3ba7ac80db0b/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-16T20:35:26,335 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-16T20:35:26,335 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-16T20:35:26,335 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64da23ca-2820-0825-2f43-3ba7ac80db0b/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-16T20:35:26,335 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64da23ca-2820-0825-2f43-3ba7ac80db0b/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-16T20:35:26,335 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64da23ca-2820-0825-2f43-3ba7ac80db0b/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-16T20:35:26,335 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64da23ca-2820-0825-2f43-3ba7ac80db0b/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-16T20:35:26,336 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64da23ca-2820-0825-2f43-3ba7ac80db0b/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-16T20:35:26,336 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64da23ca-2820-0825-2f43-3ba7ac80db0b/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-16T20:35:26,336 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64da23ca-2820-0825-2f43-3ba7ac80db0b/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-16T20:35:26,336 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64da23ca-2820-0825-2f43-3ba7ac80db0b/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-16T20:35:26,336 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64da23ca-2820-0825-2f43-3ba7ac80db0b/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-16T20:35:26,336 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64da23ca-2820-0825-2f43-3ba7ac80db0b/nfs.dump.dir in system properties and HBase conf 2024-11-16T20:35:26,336 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64da23ca-2820-0825-2f43-3ba7ac80db0b/java.io.tmpdir in system properties and HBase conf 2024-11-16T20:35:26,336 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64da23ca-2820-0825-2f43-3ba7ac80db0b/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-16T20:35:26,336 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64da23ca-2820-0825-2f43-3ba7ac80db0b/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-16T20:35:26,336 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64da23ca-2820-0825-2f43-3ba7ac80db0b/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-16T20:35:26,351 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-16T20:35:26,691 INFO [regionserver/40c018648b21:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-16T20:35:26,752 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T20:35:26,759 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T20:35:26,760 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T20:35:26,760 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T20:35:26,761 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-16T20:35:26,762 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T20:35:26,764 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1aa07d80{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64da23ca-2820-0825-2f43-3ba7ac80db0b/hadoop.log.dir/,AVAILABLE} 2024-11-16T20:35:26,764 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3150e6db{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T20:35:26,859 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2606b08f{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64da23ca-2820-0825-2f43-3ba7ac80db0b/java.io.tmpdir/jetty-localhost-38363-hadoop-hdfs-3_4_1-tests_jar-_-any-5864340876115656433/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-16T20:35:26,860 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@c053989{HTTP/1.1, (http/1.1)}{localhost:38363} 2024-11-16T20:35:26,860 INFO [Time-limited test {}] server.Server(415): Started @108692ms 2024-11-16T20:35:26,872 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-16T20:35:27,112 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T20:35:27,116 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T20:35:27,119 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T20:35:27,119 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T20:35:27,119 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-16T20:35:27,119 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2cb9bebc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64da23ca-2820-0825-2f43-3ba7ac80db0b/hadoop.log.dir/,AVAILABLE} 2024-11-16T20:35:27,120 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1bf32f74{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T20:35:27,213 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@c77eea{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64da23ca-2820-0825-2f43-3ba7ac80db0b/java.io.tmpdir/jetty-localhost-46247-hadoop-hdfs-3_4_1-tests_jar-_-any-12159271931497960979/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T20:35:27,214 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1e20426d{HTTP/1.1, (http/1.1)}{localhost:46247} 2024-11-16T20:35:27,214 INFO [Time-limited test {}] server.Server(415): Started @109046ms 2024-11-16T20:35:27,215 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T20:35:27,242 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T20:35:27,245 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T20:35:27,246 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T20:35:27,246 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T20:35:27,246 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-16T20:35:27,247 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1f7f19bf{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64da23ca-2820-0825-2f43-3ba7ac80db0b/hadoop.log.dir/,AVAILABLE} 2024-11-16T20:35:27,247 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@32403ac6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T20:35:27,340 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6cd7b3e{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64da23ca-2820-0825-2f43-3ba7ac80db0b/java.io.tmpdir/jetty-localhost-35999-hadoop-hdfs-3_4_1-tests_jar-_-any-11053653822397589438/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T20:35:27,340 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5ff95875{HTTP/1.1, (http/1.1)}{localhost:35999} 2024-11-16T20:35:27,341 INFO [Time-limited test {}] server.Server(415): Started @109173ms 2024-11-16T20:35:27,342 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T20:35:28,419 WARN [Thread-672 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64da23ca-2820-0825-2f43-3ba7ac80db0b/cluster_1dc8cece-ac3d-b8bc-ccd8-8c4a0146ef64/data/data1/current/BP-1083962379-172.17.0.2-1731789326364/current, will proceed with Du for space computation calculation, 2024-11-16T20:35:28,419 WARN [Thread-673 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64da23ca-2820-0825-2f43-3ba7ac80db0b/cluster_1dc8cece-ac3d-b8bc-ccd8-8c4a0146ef64/data/data2/current/BP-1083962379-172.17.0.2-1731789326364/current, will proceed with Du for space computation calculation, 2024-11-16T20:35:28,443 WARN [Thread-636 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T20:35:28,445 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x598c0b12845c63cc with lease ID 0x455d738420b546df: Processing first storage report for DS-9da48b7c-cdd6-45f8-8c4d-921ff678f746 from datanode DatanodeRegistration(127.0.0.1:46125, datanodeUuid=3f2b0ad1-22e5-4f8e-adc2-0f54d4046b23, infoPort=37525, infoSecurePort=0, ipcPort=44965, storageInfo=lv=-57;cid=testClusterID;nsid=2134441233;c=1731789326364) 2024-11-16T20:35:28,445 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x598c0b12845c63cc with lease ID 0x455d738420b546df: from storage DS-9da48b7c-cdd6-45f8-8c4d-921ff678f746 node DatanodeRegistration(127.0.0.1:46125, datanodeUuid=3f2b0ad1-22e5-4f8e-adc2-0f54d4046b23, infoPort=37525, infoSecurePort=0, ipcPort=44965, storageInfo=lv=-57;cid=testClusterID;nsid=2134441233;c=1731789326364), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T20:35:28,445 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x598c0b12845c63cc with lease ID 0x455d738420b546df: Processing first storage report for DS-2ffe0b37-8c1a-42f9-872f-33304a9800a0 from datanode DatanodeRegistration(127.0.0.1:46125, datanodeUuid=3f2b0ad1-22e5-4f8e-adc2-0f54d4046b23, infoPort=37525, infoSecurePort=0, ipcPort=44965, storageInfo=lv=-57;cid=testClusterID;nsid=2134441233;c=1731789326364) 2024-11-16T20:35:28,445 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x598c0b12845c63cc with lease ID 0x455d738420b546df: from storage DS-2ffe0b37-8c1a-42f9-872f-33304a9800a0 node DatanodeRegistration(127.0.0.1:46125, datanodeUuid=3f2b0ad1-22e5-4f8e-adc2-0f54d4046b23, infoPort=37525, infoSecurePort=0, ipcPort=44965, storageInfo=lv=-57;cid=testClusterID;nsid=2134441233;c=1731789326364), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T20:35:28,571 WARN [Thread-683 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64da23ca-2820-0825-2f43-3ba7ac80db0b/cluster_1dc8cece-ac3d-b8bc-ccd8-8c4a0146ef64/data/data3/current/BP-1083962379-172.17.0.2-1731789326364/current, will proceed with Du for space computation calculation, 2024-11-16T20:35:28,571 WARN [Thread-684 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64da23ca-2820-0825-2f43-3ba7ac80db0b/cluster_1dc8cece-ac3d-b8bc-ccd8-8c4a0146ef64/data/data4/current/BP-1083962379-172.17.0.2-1731789326364/current, will proceed with Du for space computation calculation, 2024-11-16T20:35:28,589 WARN [Thread-659 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T20:35:28,591 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb7f3cf0a38ccbc58 with lease ID 0x455d738420b546e0: Processing first storage report for DS-66984a25-bb75-45b5-9d89-6a192cfe26fd from datanode DatanodeRegistration(127.0.0.1:39211, datanodeUuid=f19a5063-ec0a-4807-a891-570d666b0316, infoPort=33289, infoSecurePort=0, ipcPort=34271, storageInfo=lv=-57;cid=testClusterID;nsid=2134441233;c=1731789326364) 2024-11-16T20:35:28,592 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb7f3cf0a38ccbc58 with lease ID 0x455d738420b546e0: from storage DS-66984a25-bb75-45b5-9d89-6a192cfe26fd node DatanodeRegistration(127.0.0.1:39211, datanodeUuid=f19a5063-ec0a-4807-a891-570d666b0316, infoPort=33289, infoSecurePort=0, ipcPort=34271, storageInfo=lv=-57;cid=testClusterID;nsid=2134441233;c=1731789326364), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T20:35:28,592 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb7f3cf0a38ccbc58 with lease ID 0x455d738420b546e0: Processing first storage report for DS-d3b67d13-672b-4d4c-b941-4a5bddb76fae from datanode DatanodeRegistration(127.0.0.1:39211, datanodeUuid=f19a5063-ec0a-4807-a891-570d666b0316, infoPort=33289, infoSecurePort=0, ipcPort=34271, storageInfo=lv=-57;cid=testClusterID;nsid=2134441233;c=1731789326364) 2024-11-16T20:35:28,592 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb7f3cf0a38ccbc58 with lease ID 0x455d738420b546e0: from storage DS-d3b67d13-672b-4d4c-b941-4a5bddb76fae node DatanodeRegistration(127.0.0.1:39211, datanodeUuid=f19a5063-ec0a-4807-a891-570d666b0316, infoPort=33289, infoSecurePort=0, ipcPort=34271, storageInfo=lv=-57;cid=testClusterID;nsid=2134441233;c=1731789326364), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T20:35:28,689 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64da23ca-2820-0825-2f43-3ba7ac80db0b 2024-11-16T20:35:28,693 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64da23ca-2820-0825-2f43-3ba7ac80db0b/cluster_1dc8cece-ac3d-b8bc-ccd8-8c4a0146ef64/zookeeper_0, clientPort=50571, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64da23ca-2820-0825-2f43-3ba7ac80db0b/cluster_1dc8cece-ac3d-b8bc-ccd8-8c4a0146ef64/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64da23ca-2820-0825-2f43-3ba7ac80db0b/cluster_1dc8cece-ac3d-b8bc-ccd8-8c4a0146ef64/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-16T20:35:28,694 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=50571 2024-11-16T20:35:28,694 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T20:35:28,695 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T20:35:28,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39211 is added to blk_1073741825_1001 (size=7) 2024-11-16T20:35:28,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46125 is added to blk_1073741825_1001 (size=7) 2024-11-16T20:35:28,706 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65 with version=8 2024-11-16T20:35:28,706 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/hbase-staging 2024-11-16T20:35:28,708 INFO [Time-limited test {}] client.ConnectionUtils(128): master/40c018648b21:0 server-side Connection retries=45 2024-11-16T20:35:28,708 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T20:35:28,708 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-16T20:35:28,708 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-16T20:35:28,709 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T20:35:28,709 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-16T20:35:28,709 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-16T20:35:28,709 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-16T20:35:28,709 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37849 2024-11-16T20:35:28,711 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:37849 connecting to ZooKeeper ensemble=127.0.0.1:50571 2024-11-16T20:35:28,769 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:378490x0, quorum=127.0.0.1:50571, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-16T20:35:28,770 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:37849-0x101455c0ce00000 connected 2024-11-16T20:35:28,856 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T20:35:28,859 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T20:35:28,863 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37849-0x101455c0ce00000, quorum=127.0.0.1:50571, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T20:35:28,863 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65, hbase.cluster.distributed=false 2024-11-16T20:35:28,865 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37849-0x101455c0ce00000, quorum=127.0.0.1:50571, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-16T20:35:28,866 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37849 2024-11-16T20:35:28,866 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37849 2024-11-16T20:35:28,866 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37849 2024-11-16T20:35:28,866 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37849 2024-11-16T20:35:28,867 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37849 2024-11-16T20:35:28,882 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/40c018648b21:0 server-side Connection retries=45 2024-11-16T20:35:28,882 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T20:35:28,882 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-16T20:35:28,882 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-16T20:35:28,882 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T20:35:28,882 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-16T20:35:28,883 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-16T20:35:28,883 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-16T20:35:28,883 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46419 2024-11-16T20:35:28,885 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:46419 connecting to ZooKeeper ensemble=127.0.0.1:50571 2024-11-16T20:35:28,885 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T20:35:28,887 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T20:35:28,898 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:464190x0, quorum=127.0.0.1:50571, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-16T20:35:28,899 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46419-0x101455c0ce00001, quorum=127.0.0.1:50571, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T20:35:28,899 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:46419-0x101455c0ce00001 connected 2024-11-16T20:35:28,899 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-16T20:35:28,900 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-16T20:35:28,900 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46419-0x101455c0ce00001, quorum=127.0.0.1:50571, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-16T20:35:28,901 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46419-0x101455c0ce00001, quorum=127.0.0.1:50571, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-16T20:35:28,903 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46419 2024-11-16T20:35:28,908 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46419 2024-11-16T20:35:28,909 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46419 2024-11-16T20:35:28,909 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46419 2024-11-16T20:35:28,909 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46419 2024-11-16T20:35:28,921 DEBUG [M:0;40c018648b21:37849 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;40c018648b21:37849 2024-11-16T20:35:28,921 INFO [master/40c018648b21:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/40c018648b21,37849,1731789328708 2024-11-16T20:35:28,929 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46419-0x101455c0ce00001, quorum=127.0.0.1:50571, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T20:35:28,929 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37849-0x101455c0ce00000, quorum=127.0.0.1:50571, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T20:35:28,930 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37849-0x101455c0ce00000, quorum=127.0.0.1:50571, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/40c018648b21,37849,1731789328708 2024-11-16T20:35:28,940 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46419-0x101455c0ce00001, quorum=127.0.0.1:50571, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-16T20:35:28,940 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37849-0x101455c0ce00000, quorum=127.0.0.1:50571, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:35:28,940 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46419-0x101455c0ce00001, quorum=127.0.0.1:50571, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:35:28,941 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37849-0x101455c0ce00000, quorum=127.0.0.1:50571, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-16T20:35:28,941 INFO [master/40c018648b21:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/40c018648b21,37849,1731789328708 from backup master directory 2024-11-16T20:35:28,950 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46419-0x101455c0ce00001, quorum=127.0.0.1:50571, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T20:35:28,950 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37849-0x101455c0ce00000, quorum=127.0.0.1:50571, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/40c018648b21,37849,1731789328708 2024-11-16T20:35:28,950 WARN [master/40c018648b21:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-16T20:35:28,950 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37849-0x101455c0ce00000, quorum=127.0.0.1:50571, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T20:35:28,951 INFO [master/40c018648b21:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=40c018648b21,37849,1731789328708 2024-11-16T20:35:28,955 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/hbase.id] with ID: 9fe809fb-a4f5-43c8-bad7-7b8b0a651d6b 2024-11-16T20:35:28,955 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/.tmp/hbase.id 2024-11-16T20:35:28,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39211 is added to blk_1073741826_1002 (size=42) 2024-11-16T20:35:28,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46125 is added to blk_1073741826_1002 (size=42) 2024-11-16T20:35:28,962 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/.tmp/hbase.id]:[hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/hbase.id] 2024-11-16T20:35:28,974 INFO [master/40c018648b21:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T20:35:28,974 INFO [master/40c018648b21:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-16T20:35:28,976 INFO [master/40c018648b21:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-16T20:35:28,989 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37849-0x101455c0ce00000, quorum=127.0.0.1:50571, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:35:28,989 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46419-0x101455c0ce00001, quorum=127.0.0.1:50571, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:35:29,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39211 is added to blk_1073741827_1003 (size=196) 2024-11-16T20:35:29,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46125 is added to blk_1073741827_1003 (size=196) 2024-11-16T20:35:29,011 INFO [master/40c018648b21:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-16T20:35:29,012 INFO [master/40c018648b21:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-16T20:35:29,012 INFO [master/40c018648b21:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T20:35:29,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39211 is added to blk_1073741828_1004 (size=1189) 2024-11-16T20:35:29,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46125 is added to blk_1073741828_1004 (size=1189) 2024-11-16T20:35:29,024 INFO [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/MasterData/data/master/store 2024-11-16T20:35:29,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39211 is added to blk_1073741829_1005 (size=34) 2024-11-16T20:35:29,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46125 is added to blk_1073741829_1005 (size=34) 2024-11-16T20:35:29,033 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T20:35:29,033 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-16T20:35:29,033 INFO [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T20:35:29,033 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T20:35:29,034 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-16T20:35:29,034 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T20:35:29,034 INFO [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T20:35:29,034 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731789329033Disabling compacts and flushes for region at 1731789329033Disabling writes for close at 1731789329034 (+1 ms)Writing region close event to WAL at 1731789329034Closed at 1731789329034 2024-11-16T20:35:29,035 WARN [master/40c018648b21:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/MasterData/data/master/store/.initializing 2024-11-16T20:35:29,035 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/MasterData/WALs/40c018648b21,37849,1731789328708 2024-11-16T20:35:29,038 INFO [master/40c018648b21:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=40c018648b21%2C37849%2C1731789328708, suffix=, logDir=hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/MasterData/WALs/40c018648b21,37849,1731789328708, archiveDir=hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/MasterData/oldWALs, maxLogs=10 2024-11-16T20:35:29,039 INFO [master/40c018648b21:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 40c018648b21%2C37849%2C1731789328708.1731789329038 2024-11-16T20:35:29,045 INFO [master/40c018648b21:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/MasterData/WALs/40c018648b21,37849,1731789328708/40c018648b21%2C37849%2C1731789328708.1731789329038 2024-11-16T20:35:29,048 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33289:33289),(127.0.0.1/127.0.0.1:37525:37525)] 2024-11-16T20:35:29,049 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-16T20:35:29,049 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T20:35:29,049 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T20:35:29,049 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T20:35:29,051 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T20:35:29,053 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-16T20:35:29,053 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:35:29,053 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T20:35:29,054 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T20:35:29,055 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-16T20:35:29,055 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:35:29,056 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T20:35:29,056 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T20:35:29,058 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-16T20:35:29,058 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:35:29,058 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T20:35:29,058 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T20:35:29,060 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-16T20:35:29,060 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:35:29,061 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T20:35:29,061 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T20:35:29,062 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-16T20:35:29,062 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-16T20:35:29,064 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T20:35:29,064 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T20:35:29,065 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-16T20:35:29,066 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T20:35:29,068 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T20:35:29,069 INFO [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=734959, jitterRate=-0.06545145809650421}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-16T20:35:29,070 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731789329050Initializing all the Stores at 1731789329051 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731789329051Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731789329051Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731789329051Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731789329051Cleaning up temporary data from old regions at 1731789329064 (+13 ms)Region opened successfully at 1731789329070 (+6 ms) 2024-11-16T20:35:29,070 INFO [master/40c018648b21:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-16T20:35:29,074 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5dda7a2b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=40c018648b21/172.17.0.2:0 2024-11-16T20:35:29,075 INFO [master/40c018648b21:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-16T20:35:29,076 INFO [master/40c018648b21:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-16T20:35:29,076 INFO [master/40c018648b21:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-16T20:35:29,076 INFO [master/40c018648b21:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-16T20:35:29,077 INFO [master/40c018648b21:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-16T20:35:29,077 INFO [master/40c018648b21:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-16T20:35:29,077 INFO [master/40c018648b21:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-16T20:35:29,080 INFO [master/40c018648b21:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-16T20:35:29,081 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37849-0x101455c0ce00000, quorum=127.0.0.1:50571, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-16T20:35:29,087 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-16T20:35:29,088 INFO [master/40c018648b21:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-16T20:35:29,088 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37849-0x101455c0ce00000, quorum=127.0.0.1:50571, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-16T20:35:29,098 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-16T20:35:29,098 INFO [master/40c018648b21:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-16T20:35:29,100 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37849-0x101455c0ce00000, quorum=127.0.0.1:50571, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-16T20:35:29,108 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-16T20:35:29,109 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37849-0x101455c0ce00000, quorum=127.0.0.1:50571, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-16T20:35:29,119 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-16T20:35:29,120 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37849-0x101455c0ce00000, quorum=127.0.0.1:50571, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-16T20:35:29,129 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-16T20:35:29,140 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37849-0x101455c0ce00000, quorum=127.0.0.1:50571, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-16T20:35:29,140 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46419-0x101455c0ce00001, quorum=127.0.0.1:50571, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-16T20:35:29,140 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37849-0x101455c0ce00000, quorum=127.0.0.1:50571, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:35:29,140 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46419-0x101455c0ce00001, quorum=127.0.0.1:50571, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:35:29,141 INFO [master/40c018648b21:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=40c018648b21,37849,1731789328708, sessionid=0x101455c0ce00000, setting cluster-up flag (Was=false) 2024-11-16T20:35:29,161 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37849-0x101455c0ce00000, quorum=127.0.0.1:50571, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:35:29,161 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46419-0x101455c0ce00001, quorum=127.0.0.1:50571, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:35:29,193 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-16T20:35:29,195 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=40c018648b21,37849,1731789328708 2024-11-16T20:35:29,214 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37849-0x101455c0ce00000, quorum=127.0.0.1:50571, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:35:29,214 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46419-0x101455c0ce00001, quorum=127.0.0.1:50571, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:35:29,245 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-16T20:35:29,248 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=40c018648b21,37849,1731789328708 2024-11-16T20:35:29,250 INFO [master/40c018648b21:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-16T20:35:29,253 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-16T20:35:29,254 INFO [master/40c018648b21:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-16T20:35:29,254 INFO [master/40c018648b21:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-16T20:35:29,255 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 40c018648b21,37849,1731789328708 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-16T20:35:29,257 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/40c018648b21:0, corePoolSize=5, maxPoolSize=5 2024-11-16T20:35:29,257 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/40c018648b21:0, corePoolSize=5, maxPoolSize=5 2024-11-16T20:35:29,257 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/40c018648b21:0, corePoolSize=5, maxPoolSize=5 2024-11-16T20:35:29,257 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/40c018648b21:0, corePoolSize=5, maxPoolSize=5 2024-11-16T20:35:29,257 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/40c018648b21:0, corePoolSize=10, maxPoolSize=10 2024-11-16T20:35:29,258 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:35:29,258 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/40c018648b21:0, corePoolSize=2, maxPoolSize=2 2024-11-16T20:35:29,258 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:35:29,259 INFO [master/40c018648b21:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731789359259 2024-11-16T20:35:29,259 INFO [master/40c018648b21:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-16T20:35:29,259 INFO [master/40c018648b21:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-16T20:35:29,259 INFO [master/40c018648b21:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-16T20:35:29,259 INFO [master/40c018648b21:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-16T20:35:29,259 INFO [master/40c018648b21:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-16T20:35:29,260 INFO [master/40c018648b21:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-16T20:35:29,260 INFO [master/40c018648b21:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-16T20:35:29,260 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T20:35:29,260 INFO [master/40c018648b21:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-16T20:35:29,260 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-16T20:35:29,260 INFO [master/40c018648b21:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-16T20:35:29,261 INFO [master/40c018648b21:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-16T20:35:29,261 INFO [master/40c018648b21:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-16T20:35:29,261 INFO [master/40c018648b21:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-16T20:35:29,261 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/40c018648b21:0:becomeActiveMaster-HFileCleaner.large.0-1731789329261,5,FailOnTimeoutGroup] 2024-11-16T20:35:29,262 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/40c018648b21:0:becomeActiveMaster-HFileCleaner.small.0-1731789329262,5,FailOnTimeoutGroup] 2024-11-16T20:35:29,262 INFO [master/40c018648b21:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-16T20:35:29,262 INFO [master/40c018648b21:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-16T20:35:29,262 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:35:29,262 INFO [master/40c018648b21:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-16T20:35:29,262 INFO [master/40c018648b21:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-16T20:35:29,262 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-16T20:35:29,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39211 is added to blk_1073741831_1007 (size=1321) 2024-11-16T20:35:29,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46125 is added to blk_1073741831_1007 (size=1321) 2024-11-16T20:35:29,272 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-16T20:35:29,272 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65 2024-11-16T20:35:29,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46125 is added to blk_1073741832_1008 (size=32) 2024-11-16T20:35:29,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39211 is added to blk_1073741832_1008 (size=32) 2024-11-16T20:35:29,281 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T20:35:29,282 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-16T20:35:29,284 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-16T20:35:29,284 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:35:29,284 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T20:35:29,285 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-16T20:35:29,286 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-16T20:35:29,286 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:35:29,287 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T20:35:29,287 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-16T20:35:29,288 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-16T20:35:29,288 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:35:29,289 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T20:35:29,289 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-16T20:35:29,290 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-16T20:35:29,291 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:35:29,291 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T20:35:29,291 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-16T20:35:29,292 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/data/hbase/meta/1588230740 2024-11-16T20:35:29,292 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/data/hbase/meta/1588230740 2024-11-16T20:35:29,293 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-16T20:35:29,294 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-16T20:35:29,294 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-16T20:35:29,295 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-16T20:35:29,297 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T20:35:29,298 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=758757, jitterRate=-0.0351911336183548}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-16T20:35:29,299 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731789329281Initializing all the Stores at 1731789329282 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731789329282Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731789329282Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731789329282Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731789329282Cleaning up temporary data from old regions at 1731789329294 (+12 ms)Region opened successfully at 1731789329299 (+5 ms) 2024-11-16T20:35:29,299 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-16T20:35:29,299 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-16T20:35:29,299 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-16T20:35:29,299 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-16T20:35:29,299 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-16T20:35:29,299 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-16T20:35:29,299 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731789329299Disabling compacts and flushes for region at 1731789329299Disabling writes for close at 1731789329299Writing region close event to WAL at 1731789329299Closed at 1731789329299 2024-11-16T20:35:29,301 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T20:35:29,301 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-16T20:35:29,301 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-16T20:35:29,302 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-16T20:35:29,303 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-16T20:35:29,311 INFO [RS:0;40c018648b21:46419 {}] regionserver.HRegionServer(746): ClusterId : 9fe809fb-a4f5-43c8-bad7-7b8b0a651d6b 2024-11-16T20:35:29,311 DEBUG [RS:0;40c018648b21:46419 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-16T20:35:29,320 DEBUG [RS:0;40c018648b21:46419 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-16T20:35:29,320 DEBUG [RS:0;40c018648b21:46419 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-16T20:35:29,331 DEBUG [RS:0;40c018648b21:46419 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-16T20:35:29,331 DEBUG [RS:0;40c018648b21:46419 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@48d15746, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=40c018648b21/172.17.0.2:0 2024-11-16T20:35:29,342 DEBUG [RS:0;40c018648b21:46419 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;40c018648b21:46419 2024-11-16T20:35:29,342 INFO [RS:0;40c018648b21:46419 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-16T20:35:29,342 INFO [RS:0;40c018648b21:46419 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-16T20:35:29,342 DEBUG [RS:0;40c018648b21:46419 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-16T20:35:29,343 INFO [RS:0;40c018648b21:46419 {}] regionserver.HRegionServer(2659): reportForDuty to master=40c018648b21,37849,1731789328708 with port=46419, startcode=1731789328882 2024-11-16T20:35:29,344 DEBUG [RS:0;40c018648b21:46419 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-16T20:35:29,345 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49411, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-16T20:35:29,346 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37849 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 40c018648b21,46419,1731789328882 2024-11-16T20:35:29,346 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37849 {}] master.ServerManager(517): Registering regionserver=40c018648b21,46419,1731789328882 2024-11-16T20:35:29,348 DEBUG [RS:0;40c018648b21:46419 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65 2024-11-16T20:35:29,348 DEBUG [RS:0;40c018648b21:46419 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:33297 2024-11-16T20:35:29,348 DEBUG [RS:0;40c018648b21:46419 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-16T20:35:29,361 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37849-0x101455c0ce00000, quorum=127.0.0.1:50571, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-16T20:35:29,362 DEBUG [RS:0;40c018648b21:46419 {}] zookeeper.ZKUtil(111): regionserver:46419-0x101455c0ce00001, quorum=127.0.0.1:50571, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/40c018648b21,46419,1731789328882 2024-11-16T20:35:29,362 WARN [RS:0;40c018648b21:46419 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-16T20:35:29,362 INFO [RS:0;40c018648b21:46419 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T20:35:29,362 DEBUG [RS:0;40c018648b21:46419 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882 2024-11-16T20:35:29,362 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [40c018648b21,46419,1731789328882] 2024-11-16T20:35:29,366 INFO [RS:0;40c018648b21:46419 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-16T20:35:29,368 INFO [RS:0;40c018648b21:46419 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-16T20:35:29,369 INFO [RS:0;40c018648b21:46419 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-16T20:35:29,369 INFO [RS:0;40c018648b21:46419 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T20:35:29,369 INFO [RS:0;40c018648b21:46419 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-16T20:35:29,370 INFO [RS:0;40c018648b21:46419 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-16T20:35:29,370 INFO [RS:0;40c018648b21:46419 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-16T20:35:29,370 DEBUG [RS:0;40c018648b21:46419 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:35:29,370 DEBUG [RS:0;40c018648b21:46419 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:35:29,370 DEBUG [RS:0;40c018648b21:46419 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:35:29,370 DEBUG [RS:0;40c018648b21:46419 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:35:29,371 DEBUG [RS:0;40c018648b21:46419 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:35:29,371 DEBUG [RS:0;40c018648b21:46419 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/40c018648b21:0, corePoolSize=2, maxPoolSize=2 2024-11-16T20:35:29,371 DEBUG [RS:0;40c018648b21:46419 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:35:29,371 DEBUG [RS:0;40c018648b21:46419 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:35:29,371 DEBUG [RS:0;40c018648b21:46419 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:35:29,371 DEBUG [RS:0;40c018648b21:46419 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:35:29,371 DEBUG [RS:0;40c018648b21:46419 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:35:29,371 DEBUG [RS:0;40c018648b21:46419 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:35:29,371 DEBUG [RS:0;40c018648b21:46419 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/40c018648b21:0, corePoolSize=3, maxPoolSize=3 2024-11-16T20:35:29,371 DEBUG [RS:0;40c018648b21:46419 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/40c018648b21:0, corePoolSize=3, maxPoolSize=3 2024-11-16T20:35:29,372 INFO [RS:0;40c018648b21:46419 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T20:35:29,372 INFO [RS:0;40c018648b21:46419 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T20:35:29,372 INFO [RS:0;40c018648b21:46419 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T20:35:29,372 INFO [RS:0;40c018648b21:46419 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-16T20:35:29,372 INFO [RS:0;40c018648b21:46419 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-16T20:35:29,372 INFO [RS:0;40c018648b21:46419 {}] hbase.ChoreService(168): Chore ScheduledChore name=40c018648b21,46419,1731789328882-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-16T20:35:29,387 INFO [RS:0;40c018648b21:46419 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-16T20:35:29,387 INFO [RS:0;40c018648b21:46419 {}] hbase.ChoreService(168): Chore ScheduledChore name=40c018648b21,46419,1731789328882-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T20:35:29,387 INFO [RS:0;40c018648b21:46419 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T20:35:29,387 INFO [RS:0;40c018648b21:46419 {}] regionserver.Replication(171): 40c018648b21,46419,1731789328882 started 2024-11-16T20:35:29,424 INFO [RS:0;40c018648b21:46419 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T20:35:29,425 INFO [RS:0;40c018648b21:46419 {}] regionserver.HRegionServer(1482): Serving as 40c018648b21,46419,1731789328882, RpcServer on 40c018648b21/172.17.0.2:46419, sessionid=0x101455c0ce00001 2024-11-16T20:35:29,425 DEBUG [RS:0;40c018648b21:46419 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-16T20:35:29,425 DEBUG [RS:0;40c018648b21:46419 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 40c018648b21,46419,1731789328882 2024-11-16T20:35:29,425 DEBUG [RS:0;40c018648b21:46419 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '40c018648b21,46419,1731789328882' 2024-11-16T20:35:29,425 DEBUG [RS:0;40c018648b21:46419 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-16T20:35:29,426 DEBUG [RS:0;40c018648b21:46419 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-16T20:35:29,426 DEBUG [RS:0;40c018648b21:46419 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-16T20:35:29,426 DEBUG [RS:0;40c018648b21:46419 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-16T20:35:29,426 DEBUG [RS:0;40c018648b21:46419 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 40c018648b21,46419,1731789328882 2024-11-16T20:35:29,426 DEBUG [RS:0;40c018648b21:46419 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '40c018648b21,46419,1731789328882' 2024-11-16T20:35:29,426 DEBUG [RS:0;40c018648b21:46419 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-16T20:35:29,427 DEBUG [RS:0;40c018648b21:46419 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-16T20:35:29,427 DEBUG [RS:0;40c018648b21:46419 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-16T20:35:29,427 INFO [RS:0;40c018648b21:46419 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-16T20:35:29,427 INFO [RS:0;40c018648b21:46419 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-16T20:35:29,454 WARN [40c018648b21:37849 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-16T20:35:29,530 INFO [RS:0;40c018648b21:46419 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=40c018648b21%2C46419%2C1731789328882, suffix=, logDir=hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882, archiveDir=hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/oldWALs, maxLogs=32 2024-11-16T20:35:29,531 INFO [RS:0;40c018648b21:46419 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 40c018648b21%2C46419%2C1731789328882.1731789329530 2024-11-16T20:35:29,537 INFO [RS:0;40c018648b21:46419 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.1731789329530 2024-11-16T20:35:29,540 DEBUG [RS:0;40c018648b21:46419 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37525:37525),(127.0.0.1/127.0.0.1:33289:33289)] 2024-11-16T20:35:29,704 DEBUG [40c018648b21:37849 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-16T20:35:29,705 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=40c018648b21,46419,1731789328882 2024-11-16T20:35:29,707 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 40c018648b21,46419,1731789328882, state=OPENING 2024-11-16T20:35:29,719 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-16T20:35:29,730 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37849-0x101455c0ce00000, quorum=127.0.0.1:50571, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:35:29,730 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46419-0x101455c0ce00001, quorum=127.0.0.1:50571, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:35:29,731 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-16T20:35:29,732 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T20:35:29,732 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T20:35:29,732 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=40c018648b21,46419,1731789328882}] 2024-11-16T20:35:29,887 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-16T20:35:29,890 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49833, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-16T20:35:29,894 INFO [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-16T20:35:29,894 INFO [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T20:35:29,897 INFO [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=40c018648b21%2C46419%2C1731789328882.meta, suffix=.meta, logDir=hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882, archiveDir=hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/oldWALs, maxLogs=32 2024-11-16T20:35:29,898 INFO [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta 2024-11-16T20:35:29,905 INFO [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta 2024-11-16T20:35:29,907 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33289:33289),(127.0.0.1/127.0.0.1:37525:37525)] 2024-11-16T20:35:29,908 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-16T20:35:29,909 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-16T20:35:29,909 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-16T20:35:29,909 INFO [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-16T20:35:29,909 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-16T20:35:29,909 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T20:35:29,909 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-16T20:35:29,910 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-16T20:35:29,912 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-16T20:35:29,913 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-16T20:35:29,913 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:35:29,914 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T20:35:29,914 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-16T20:35:29,915 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-16T20:35:29,915 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:35:29,915 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T20:35:29,915 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-16T20:35:29,916 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-16T20:35:29,916 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:35:29,917 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T20:35:29,917 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-16T20:35:29,918 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-16T20:35:29,918 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:35:29,919 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T20:35:29,919 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-16T20:35:29,920 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/data/hbase/meta/1588230740 2024-11-16T20:35:29,921 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/data/hbase/meta/1588230740 2024-11-16T20:35:29,923 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-16T20:35:29,923 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-16T20:35:29,924 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-16T20:35:29,926 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-16T20:35:29,926 INFO [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=878596, jitterRate=0.11719293892383575}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-16T20:35:29,926 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-16T20:35:29,927 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731789329910Writing region info on filesystem at 1731789329910Initializing all the Stores at 1731789329911 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731789329911Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731789329911Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731789329911Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731789329911Cleaning up temporary data from old regions at 1731789329923 (+12 ms)Running coprocessor post-open hooks at 1731789329926 (+3 ms)Region opened successfully at 1731789329927 (+1 ms) 2024-11-16T20:35:29,928 INFO [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731789329887 2024-11-16T20:35:29,931 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-16T20:35:29,931 INFO [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-16T20:35:29,931 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=40c018648b21,46419,1731789328882 2024-11-16T20:35:29,932 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 40c018648b21,46419,1731789328882, state=OPEN 2024-11-16T20:35:29,969 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37849-0x101455c0ce00000, quorum=127.0.0.1:50571, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-16T20:35:29,969 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46419-0x101455c0ce00001, quorum=127.0.0.1:50571, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-16T20:35:29,969 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=40c018648b21,46419,1731789328882 2024-11-16T20:35:29,969 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T20:35:29,969 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T20:35:29,972 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-16T20:35:29,972 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=40c018648b21,46419,1731789328882 in 237 msec 2024-11-16T20:35:29,975 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-16T20:35:29,975 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 671 msec 2024-11-16T20:35:29,976 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T20:35:29,976 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-16T20:35:29,978 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T20:35:29,978 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=40c018648b21,46419,1731789328882, seqNum=-1] 2024-11-16T20:35:29,978 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T20:35:29,979 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35951, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T20:35:29,986 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 733 msec 2024-11-16T20:35:29,986 INFO [master/40c018648b21:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731789329986, completionTime=-1 2024-11-16T20:35:29,986 INFO [master/40c018648b21:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-16T20:35:29,986 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-16T20:35:29,988 INFO [master/40c018648b21:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-16T20:35:29,988 INFO [master/40c018648b21:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731789389988 2024-11-16T20:35:29,988 INFO [master/40c018648b21:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731789449988 2024-11-16T20:35:29,989 INFO [master/40c018648b21:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-16T20:35:29,989 INFO [master/40c018648b21:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=40c018648b21,37849,1731789328708-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T20:35:29,989 INFO [master/40c018648b21:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=40c018648b21,37849,1731789328708-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T20:35:29,989 INFO [master/40c018648b21:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=40c018648b21,37849,1731789328708-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T20:35:29,989 INFO [master/40c018648b21:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-40c018648b21:37849, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T20:35:29,989 INFO [master/40c018648b21:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-16T20:35:29,989 INFO [master/40c018648b21:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-16T20:35:29,991 DEBUG [master/40c018648b21:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-16T20:35:29,994 INFO [master/40c018648b21:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.043sec 2024-11-16T20:35:29,994 INFO [master/40c018648b21:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-16T20:35:29,994 INFO [master/40c018648b21:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-16T20:35:29,994 INFO [master/40c018648b21:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-16T20:35:29,994 INFO [master/40c018648b21:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-16T20:35:29,994 INFO [master/40c018648b21:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-16T20:35:29,994 INFO [master/40c018648b21:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=40c018648b21,37849,1731789328708-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-16T20:35:29,994 INFO [master/40c018648b21:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=40c018648b21,37849,1731789328708-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-16T20:35:29,997 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-16T20:35:29,997 INFO [master/40c018648b21:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-16T20:35:29,997 INFO [master/40c018648b21:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=40c018648b21,37849,1731789328708-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T20:35:30,021 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7982f2bb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T20:35:30,021 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 40c018648b21,37849,-1 for getting cluster id 2024-11-16T20:35:30,021 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-16T20:35:30,023 DEBUG [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '9fe809fb-a4f5-43c8-bad7-7b8b0a651d6b' 2024-11-16T20:35:30,023 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-16T20:35:30,023 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "9fe809fb-a4f5-43c8-bad7-7b8b0a651d6b" 2024-11-16T20:35:30,024 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5f633512, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T20:35:30,024 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [40c018648b21,37849,-1] 2024-11-16T20:35:30,024 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-16T20:35:30,025 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T20:35:30,026 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53332, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-16T20:35:30,027 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2be29fbd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T20:35:30,028 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T20:35:30,029 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=40c018648b21,46419,1731789328882, seqNum=-1] 2024-11-16T20:35:30,029 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T20:35:30,031 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42936, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T20:35:30,034 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=40c018648b21,37849,1731789328708 2024-11-16T20:35:30,034 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T20:35:30,037 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-16T20:35:30,053 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/40c018648b21:0 server-side Connection retries=45 2024-11-16T20:35:30,053 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T20:35:30,053 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-16T20:35:30,053 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-16T20:35:30,053 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T20:35:30,053 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-16T20:35:30,053 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-16T20:35:30,053 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-16T20:35:30,054 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39701 2024-11-16T20:35:30,055 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:39701 connecting to ZooKeeper ensemble=127.0.0.1:50571 2024-11-16T20:35:30,056 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T20:35:30,057 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T20:35:30,077 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:397010x0, quorum=127.0.0.1:50571, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-16T20:35:30,078 DEBUG [pool-381-thread-1 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: INIT 2024-11-16T20:35:30,078 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:397010x0, quorum=127.0.0.1:50571, baseZNode=/hbase Set watcher on existing znode=/hbase/running 2024-11-16T20:35:30,078 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39701-0x101455c0ce00002 connected 2024-11-16T20:35:30,078 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-16T20:35:30,079 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-16T20:35:30,079 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:39701-0x101455c0ce00002, quorum=127.0.0.1:50571, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-16T20:35:30,081 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39701-0x101455c0ce00002, quorum=127.0.0.1:50571, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-16T20:35:30,084 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39701 2024-11-16T20:35:30,084 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39701 2024-11-16T20:35:30,086 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39701 2024-11-16T20:35:30,086 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39701 2024-11-16T20:35:30,088 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39701 2024-11-16T20:35:30,090 INFO [RS:1;40c018648b21:39701 {}] regionserver.HRegionServer(746): ClusterId : 9fe809fb-a4f5-43c8-bad7-7b8b0a651d6b 2024-11-16T20:35:30,090 DEBUG [RS:1;40c018648b21:39701 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-16T20:35:30,099 DEBUG [RS:1;40c018648b21:39701 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-16T20:35:30,099 DEBUG [RS:1;40c018648b21:39701 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-16T20:35:30,109 DEBUG [RS:1;40c018648b21:39701 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-16T20:35:30,110 DEBUG [RS:1;40c018648b21:39701 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6d444263, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=40c018648b21/172.17.0.2:0 2024-11-16T20:35:30,122 DEBUG [RS:1;40c018648b21:39701 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;40c018648b21:39701 2024-11-16T20:35:30,122 INFO [RS:1;40c018648b21:39701 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-16T20:35:30,122 INFO [RS:1;40c018648b21:39701 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-16T20:35:30,122 DEBUG [RS:1;40c018648b21:39701 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-16T20:35:30,123 INFO [RS:1;40c018648b21:39701 {}] regionserver.HRegionServer(2659): reportForDuty to master=40c018648b21,37849,1731789328708 with port=39701, startcode=1731789330052 2024-11-16T20:35:30,123 DEBUG [RS:1;40c018648b21:39701 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-16T20:35:30,125 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41543, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-11-16T20:35:30,125 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37849 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 40c018648b21,39701,1731789330052 2024-11-16T20:35:30,125 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37849 {}] master.ServerManager(517): Registering regionserver=40c018648b21,39701,1731789330052 2024-11-16T20:35:30,127 DEBUG [RS:1;40c018648b21:39701 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65 2024-11-16T20:35:30,127 DEBUG [RS:1;40c018648b21:39701 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:33297 2024-11-16T20:35:30,127 DEBUG [RS:1;40c018648b21:39701 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-16T20:35:30,140 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37849-0x101455c0ce00000, quorum=127.0.0.1:50571, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-16T20:35:30,140 DEBUG [RS:1;40c018648b21:39701 {}] zookeeper.ZKUtil(111): regionserver:39701-0x101455c0ce00002, quorum=127.0.0.1:50571, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/40c018648b21,39701,1731789330052 2024-11-16T20:35:30,141 WARN [RS:1;40c018648b21:39701 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-16T20:35:30,141 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [40c018648b21,39701,1731789330052] 2024-11-16T20:35:30,141 INFO [RS:1;40c018648b21:39701 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T20:35:30,141 DEBUG [RS:1;40c018648b21:39701 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052 2024-11-16T20:35:30,145 INFO [RS:1;40c018648b21:39701 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-16T20:35:30,148 INFO [RS:1;40c018648b21:39701 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-16T20:35:30,148 INFO [RS:1;40c018648b21:39701 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-16T20:35:30,148 INFO [RS:1;40c018648b21:39701 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T20:35:30,148 INFO [RS:1;40c018648b21:39701 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-16T20:35:30,149 INFO [RS:1;40c018648b21:39701 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-16T20:35:30,150 INFO [RS:1;40c018648b21:39701 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-16T20:35:30,150 DEBUG [RS:1;40c018648b21:39701 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:35:30,150 DEBUG [RS:1;40c018648b21:39701 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:35:30,150 DEBUG [RS:1;40c018648b21:39701 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:35:30,150 DEBUG [RS:1;40c018648b21:39701 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:35:30,150 DEBUG [RS:1;40c018648b21:39701 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:35:30,150 DEBUG [RS:1;40c018648b21:39701 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/40c018648b21:0, corePoolSize=2, maxPoolSize=2 2024-11-16T20:35:30,150 DEBUG [RS:1;40c018648b21:39701 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:35:30,150 DEBUG [RS:1;40c018648b21:39701 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:35:30,151 DEBUG [RS:1;40c018648b21:39701 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:35:30,151 DEBUG [RS:1;40c018648b21:39701 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:35:30,151 DEBUG [RS:1;40c018648b21:39701 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:35:30,151 DEBUG [RS:1;40c018648b21:39701 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:35:30,151 DEBUG [RS:1;40c018648b21:39701 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/40c018648b21:0, corePoolSize=3, maxPoolSize=3 2024-11-16T20:35:30,151 DEBUG [RS:1;40c018648b21:39701 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/40c018648b21:0, corePoolSize=3, maxPoolSize=3 2024-11-16T20:35:30,152 INFO [RS:1;40c018648b21:39701 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T20:35:30,152 INFO [RS:1;40c018648b21:39701 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T20:35:30,152 INFO [RS:1;40c018648b21:39701 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T20:35:30,152 INFO [RS:1;40c018648b21:39701 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-16T20:35:30,152 INFO [RS:1;40c018648b21:39701 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-16T20:35:30,152 INFO [RS:1;40c018648b21:39701 {}] hbase.ChoreService(168): Chore ScheduledChore name=40c018648b21,39701,1731789330052-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-16T20:35:30,174 INFO [RS:1;40c018648b21:39701 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-16T20:35:30,174 INFO [RS:1;40c018648b21:39701 {}] hbase.ChoreService(168): Chore ScheduledChore name=40c018648b21,39701,1731789330052-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T20:35:30,175 INFO [RS:1;40c018648b21:39701 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T20:35:30,175 INFO [RS:1;40c018648b21:39701 {}] regionserver.Replication(171): 40c018648b21,39701,1731789330052 started 2024-11-16T20:35:30,191 INFO [RS:1;40c018648b21:39701 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T20:35:30,191 INFO [RS:1;40c018648b21:39701 {}] regionserver.HRegionServer(1482): Serving as 40c018648b21,39701,1731789330052, RpcServer on 40c018648b21/172.17.0.2:39701, sessionid=0x101455c0ce00002 2024-11-16T20:35:30,191 DEBUG [RS:1;40c018648b21:39701 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-16T20:35:30,191 DEBUG [RS:1;40c018648b21:39701 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 40c018648b21,39701,1731789330052 2024-11-16T20:35:30,191 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2882): Started new server=Thread[RS:1;40c018648b21:39701,5,FailOnTimeoutGroup] 2024-11-16T20:35:30,192 DEBUG [RS:1;40c018648b21:39701 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '40c018648b21,39701,1731789330052' 2024-11-16T20:35:30,192 DEBUG [RS:1;40c018648b21:39701 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-16T20:35:30,192 INFO [Time-limited test {}] wal.TestLogRolling(207): Replication=2 2024-11-16T20:35:30,192 DEBUG [RS:1;40c018648b21:39701 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-16T20:35:30,192 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-16T20:35:30,193 DEBUG [RS:1;40c018648b21:39701 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-16T20:35:30,193 DEBUG [RS:1;40c018648b21:39701 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-16T20:35:30,193 DEBUG [RS:1;40c018648b21:39701 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 40c018648b21,39701,1731789330052 2024-11-16T20:35:30,193 DEBUG [RS:1;40c018648b21:39701 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '40c018648b21,39701,1731789330052' 2024-11-16T20:35:30,193 DEBUG [RS:1;40c018648b21:39701 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-16T20:35:30,193 DEBUG [RS:1;40c018648b21:39701 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-16T20:35:30,193 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.AsyncConnectionImpl(321): The fetched master address is 40c018648b21,37849,1731789328708 2024-11-16T20:35:30,193 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@60c9ef5b 2024-11-16T20:35:30,194 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-16T20:35:30,194 DEBUG [RS:1;40c018648b21:39701 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-16T20:35:30,194 INFO [RS:1;40c018648b21:39701 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-16T20:35:30,194 INFO [RS:1;40c018648b21:39701 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-16T20:35:30,196 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53348, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-16T20:35:30,196 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37849 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-16T20:35:30,196 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37849 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-16T20:35:30,197 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37849 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnDatanodeDeath', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-16T20:35:30,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37849 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath 2024-11-16T20:35:30,200 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_PRE_OPERATION 2024-11-16T20:35:30,200 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:35:30,200 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37849 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnDatanodeDeath" procId is: 4 2024-11-16T20:35:30,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37849 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-16T20:35:30,202 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-16T20:35:30,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46125 is added to blk_1073741835_1011 (size=393) 2024-11-16T20:35:30,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39211 is added to blk_1073741835_1011 (size=393) 2024-11-16T20:35:30,216 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => cad6a923190659cbe955b214dd2ed352, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1731789330196.cad6a923190659cbe955b214dd2ed352.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnDatanodeDeath', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65 2024-11-16T20:35:30,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46125 is added to blk_1073741836_1012 (size=76) 2024-11-16T20:35:30,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39211 is added to blk_1073741836_1012 (size=76) 2024-11-16T20:35:30,224 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1731789330196.cad6a923190659cbe955b214dd2ed352.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T20:35:30,224 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1722): Closing cad6a923190659cbe955b214dd2ed352, disabling compactions & flushes 2024-11-16T20:35:30,224 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1731789330196.cad6a923190659cbe955b214dd2ed352. 2024-11-16T20:35:30,224 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731789330196.cad6a923190659cbe955b214dd2ed352. 2024-11-16T20:35:30,224 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731789330196.cad6a923190659cbe955b214dd2ed352. after waiting 0 ms 2024-11-16T20:35:30,225 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1731789330196.cad6a923190659cbe955b214dd2ed352. 2024-11-16T20:35:30,225 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731789330196.cad6a923190659cbe955b214dd2ed352. 2024-11-16T20:35:30,225 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1676): Region close journal for cad6a923190659cbe955b214dd2ed352: Waiting for close lock at 1731789330224Disabling compacts and flushes for region at 1731789330224Disabling writes for close at 1731789330224Writing region close event to WAL at 1731789330225 (+1 ms)Closed at 1731789330225 2024-11-16T20:35:30,226 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ADD_TO_META 2024-11-16T20:35:30,227 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnDatanodeDeath,,1731789330196.cad6a923190659cbe955b214dd2ed352.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1731789330227"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731789330227"}]},"ts":"1731789330227"} 2024-11-16T20:35:30,230 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-16T20:35:30,232 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-16T20:35:30,232 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731789330232"}]},"ts":"1731789330232"} 2024-11-16T20:35:30,235 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLING in hbase:meta 2024-11-16T20:35:30,235 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=cad6a923190659cbe955b214dd2ed352, ASSIGN}] 2024-11-16T20:35:30,237 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=cad6a923190659cbe955b214dd2ed352, ASSIGN 2024-11-16T20:35:30,238 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=cad6a923190659cbe955b214dd2ed352, ASSIGN; state=OFFLINE, location=40c018648b21,46419,1731789328882; forceNewPlan=false, retain=false 2024-11-16T20:35:30,296 INFO [RS:1;40c018648b21:39701 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=40c018648b21%2C39701%2C1731789330052, suffix=, logDir=hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052, archiveDir=hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/oldWALs, maxLogs=32 2024-11-16T20:35:30,298 INFO [RS:1;40c018648b21:39701 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 40c018648b21%2C39701%2C1731789330052.1731789330297 2024-11-16T20:35:30,304 INFO [RS:1;40c018648b21:39701 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 2024-11-16T20:35:30,309 DEBUG [RS:1;40c018648b21:39701 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33289:33289),(127.0.0.1/127.0.0.1:37525:37525)] 2024-11-16T20:35:30,389 INFO [40c018648b21:37849 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-16T20:35:30,390 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=cad6a923190659cbe955b214dd2ed352, regionState=OPENING, regionLocation=40c018648b21,46419,1731789328882 2024-11-16T20:35:30,396 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=cad6a923190659cbe955b214dd2ed352, ASSIGN because future has completed 2024-11-16T20:35:30,397 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure cad6a923190659cbe955b214dd2ed352, server=40c018648b21,46419,1731789328882}] 2024-11-16T20:35:30,557 INFO [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnDatanodeDeath,,1731789330196.cad6a923190659cbe955b214dd2ed352. 2024-11-16T20:35:30,557 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => cad6a923190659cbe955b214dd2ed352, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1731789330196.cad6a923190659cbe955b214dd2ed352.', STARTKEY => '', ENDKEY => ''} 2024-11-16T20:35:30,558 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnDatanodeDeath cad6a923190659cbe955b214dd2ed352 2024-11-16T20:35:30,558 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1731789330196.cad6a923190659cbe955b214dd2ed352.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T20:35:30,558 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for cad6a923190659cbe955b214dd2ed352 2024-11-16T20:35:30,558 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for cad6a923190659cbe955b214dd2ed352 2024-11-16T20:35:30,560 INFO [StoreOpener-cad6a923190659cbe955b214dd2ed352-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region cad6a923190659cbe955b214dd2ed352 2024-11-16T20:35:30,561 INFO [StoreOpener-cad6a923190659cbe955b214dd2ed352-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region cad6a923190659cbe955b214dd2ed352 columnFamilyName info 2024-11-16T20:35:30,562 DEBUG [StoreOpener-cad6a923190659cbe955b214dd2ed352-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:35:30,562 INFO [StoreOpener-cad6a923190659cbe955b214dd2ed352-1 {}] regionserver.HStore(327): Store=cad6a923190659cbe955b214dd2ed352/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T20:35:30,562 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for cad6a923190659cbe955b214dd2ed352 2024-11-16T20:35:30,563 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cad6a923190659cbe955b214dd2ed352 2024-11-16T20:35:30,564 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cad6a923190659cbe955b214dd2ed352 2024-11-16T20:35:30,564 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for cad6a923190659cbe955b214dd2ed352 2024-11-16T20:35:30,564 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for cad6a923190659cbe955b214dd2ed352 2024-11-16T20:35:30,566 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for cad6a923190659cbe955b214dd2ed352 2024-11-16T20:35:30,569 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cad6a923190659cbe955b214dd2ed352/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T20:35:30,569 INFO [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened cad6a923190659cbe955b214dd2ed352; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=733716, jitterRate=-0.06703241169452667}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-16T20:35:30,569 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for cad6a923190659cbe955b214dd2ed352 2024-11-16T20:35:30,570 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for cad6a923190659cbe955b214dd2ed352: Running coprocessor pre-open hook at 1731789330558Writing region info on filesystem at 1731789330558Initializing all the Stores at 1731789330559 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731789330559Cleaning up temporary data from old regions at 1731789330564 (+5 ms)Running coprocessor post-open hooks at 1731789330569 (+5 ms)Region opened successfully at 1731789330570 (+1 ms) 2024-11-16T20:35:30,571 INFO [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnDatanodeDeath,,1731789330196.cad6a923190659cbe955b214dd2ed352., pid=6, masterSystemTime=1731789330553 2024-11-16T20:35:30,574 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnDatanodeDeath,,1731789330196.cad6a923190659cbe955b214dd2ed352. 2024-11-16T20:35:30,574 INFO [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnDatanodeDeath,,1731789330196.cad6a923190659cbe955b214dd2ed352. 2024-11-16T20:35:30,575 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=cad6a923190659cbe955b214dd2ed352, regionState=OPEN, openSeqNum=2, regionLocation=40c018648b21,46419,1731789328882 2024-11-16T20:35:30,577 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure cad6a923190659cbe955b214dd2ed352, server=40c018648b21,46419,1731789328882 because future has completed 2024-11-16T20:35:30,582 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-16T20:35:30,582 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure cad6a923190659cbe955b214dd2ed352, server=40c018648b21,46419,1731789328882 in 182 msec 2024-11-16T20:35:30,585 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-16T20:35:30,585 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=cad6a923190659cbe955b214dd2ed352, ASSIGN in 347 msec 2024-11-16T20:35:30,586 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-16T20:35:30,586 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731789330586"}]},"ts":"1731789330586"} 2024-11-16T20:35:30,588 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLED in hbase:meta 2024-11-16T20:35:30,590 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_POST_OPERATION 2024-11-16T20:35:30,592 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath in 393 msec 2024-11-16T20:35:31,194 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-16T20:35:31,199 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:35:31,221 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:35:31,223 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:35:31,223 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:35:35,143 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-16T20:35:35,143 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-16T20:35:35,144 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-16T20:35:35,144 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath Metrics about Tables on a single HBase RegionServer 2024-11-16T20:35:35,145 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-16T20:35:35,145 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-16T20:35:35,366 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnDatanodeDeath' 2024-11-16T20:35:40,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37849 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-16T20:35:40,224 INFO [RPCClient-NioEventLoopGroup-4-11 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnDatanodeDeath completed 2024-11-16T20:35:40,225 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnDatanodeDeath,, stopping at row=TestLogRolling-testLogRollOnDatanodeDeath ,, for max=2147483647 with caching=100 2024-11-16T20:35:40,229 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnDatanodeDeath 2024-11-16T20:35:40,229 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnDatanodeDeath,,1731789330196.cad6a923190659cbe955b214dd2ed352. 2024-11-16T20:35:40,247 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T20:35:40,251 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T20:35:40,252 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T20:35:40,252 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T20:35:40,252 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-16T20:35:40,253 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3ad9bbfc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64da23ca-2820-0825-2f43-3ba7ac80db0b/hadoop.log.dir/,AVAILABLE} 2024-11-16T20:35:40,253 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@30008f24{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T20:35:40,359 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7f55aa3b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64da23ca-2820-0825-2f43-3ba7ac80db0b/java.io.tmpdir/jetty-localhost-42003-hadoop-hdfs-3_4_1-tests_jar-_-any-13471842411242413816/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T20:35:40,359 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6bd8b5f{HTTP/1.1, (http/1.1)}{localhost:42003} 2024-11-16T20:35:40,359 INFO [Time-limited test {}] server.Server(415): Started @122192ms 2024-11-16T20:35:40,361 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T20:35:40,409 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T20:35:40,415 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T20:35:40,420 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T20:35:40,420 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T20:35:40,421 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-16T20:35:40,429 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@23038dc2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64da23ca-2820-0825-2f43-3ba7ac80db0b/hadoop.log.dir/,AVAILABLE} 2024-11-16T20:35:40,429 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3f85c2b2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T20:35:40,546 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@57d6f5a1{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64da23ca-2820-0825-2f43-3ba7ac80db0b/java.io.tmpdir/jetty-localhost-36239-hadoop-hdfs-3_4_1-tests_jar-_-any-11821805139876529336/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T20:35:40,547 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6a2bfd90{HTTP/1.1, (http/1.1)}{localhost:36239} 2024-11-16T20:35:40,547 INFO [Time-limited test {}] server.Server(415): Started @122379ms 2024-11-16T20:35:40,549 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T20:35:40,590 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T20:35:40,594 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T20:35:40,595 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T20:35:40,595 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T20:35:40,595 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-16T20:35:40,596 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7524e7e9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64da23ca-2820-0825-2f43-3ba7ac80db0b/hadoop.log.dir/,AVAILABLE} 2024-11-16T20:35:40,596 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6b21f544{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T20:35:40,699 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@463983fb{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64da23ca-2820-0825-2f43-3ba7ac80db0b/java.io.tmpdir/jetty-localhost-36297-hadoop-hdfs-3_4_1-tests_jar-_-any-5863725836453339447/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T20:35:40,700 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@57fef5ae{HTTP/1.1, (http/1.1)}{localhost:36297} 2024-11-16T20:35:40,700 INFO [Time-limited test {}] server.Server(415): Started @122532ms 2024-11-16T20:35:40,701 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T20:35:42,018 WARN [Thread-867 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64da23ca-2820-0825-2f43-3ba7ac80db0b/cluster_1dc8cece-ac3d-b8bc-ccd8-8c4a0146ef64/data/data5/current/BP-1083962379-172.17.0.2-1731789326364/current, will proceed with Du for space computation calculation, 2024-11-16T20:35:42,019 WARN [Thread-868 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64da23ca-2820-0825-2f43-3ba7ac80db0b/cluster_1dc8cece-ac3d-b8bc-ccd8-8c4a0146ef64/data/data6/current/BP-1083962379-172.17.0.2-1731789326364/current, will proceed with Du for space computation calculation, 2024-11-16T20:35:42,038 WARN [Thread-808 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T20:35:42,040 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc71eaa52f5b146ff with lease ID 0x455d738420b546e1: Processing first storage report for DS-61c766fc-84d8-4f41-b5fc-76f4226f4319 from datanode DatanodeRegistration(127.0.0.1:34869, datanodeUuid=057d21b3-d974-4667-9e19-feb20522c62e, infoPort=35253, infoSecurePort=0, ipcPort=42249, storageInfo=lv=-57;cid=testClusterID;nsid=2134441233;c=1731789326364) 2024-11-16T20:35:42,040 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc71eaa52f5b146ff with lease ID 0x455d738420b546e1: from storage DS-61c766fc-84d8-4f41-b5fc-76f4226f4319 node DatanodeRegistration(127.0.0.1:34869, datanodeUuid=057d21b3-d974-4667-9e19-feb20522c62e, infoPort=35253, infoSecurePort=0, ipcPort=42249, storageInfo=lv=-57;cid=testClusterID;nsid=2134441233;c=1731789326364), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T20:35:42,041 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc71eaa52f5b146ff with lease ID 0x455d738420b546e1: Processing first storage report for DS-dd7f6e7a-0c17-4128-bf7a-fc80bbabcbf0 from datanode DatanodeRegistration(127.0.0.1:34869, datanodeUuid=057d21b3-d974-4667-9e19-feb20522c62e, infoPort=35253, infoSecurePort=0, ipcPort=42249, storageInfo=lv=-57;cid=testClusterID;nsid=2134441233;c=1731789326364) 2024-11-16T20:35:42,041 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc71eaa52f5b146ff with lease ID 0x455d738420b546e1: from storage DS-dd7f6e7a-0c17-4128-bf7a-fc80bbabcbf0 node DatanodeRegistration(127.0.0.1:34869, datanodeUuid=057d21b3-d974-4667-9e19-feb20522c62e, infoPort=35253, infoSecurePort=0, ipcPort=42249, storageInfo=lv=-57;cid=testClusterID;nsid=2134441233;c=1731789326364), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T20:35:42,199 WARN [Thread-878 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64da23ca-2820-0825-2f43-3ba7ac80db0b/cluster_1dc8cece-ac3d-b8bc-ccd8-8c4a0146ef64/data/data7/current/BP-1083962379-172.17.0.2-1731789326364/current, will proceed with Du for space computation calculation, 2024-11-16T20:35:42,199 WARN [Thread-879 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64da23ca-2820-0825-2f43-3ba7ac80db0b/cluster_1dc8cece-ac3d-b8bc-ccd8-8c4a0146ef64/data/data8/current/BP-1083962379-172.17.0.2-1731789326364/current, will proceed with Du for space computation calculation, 2024-11-16T20:35:42,224 WARN [Thread-830 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T20:35:42,226 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9b915d9feae37402 with lease ID 0x455d738420b546e2: Processing first storage report for DS-1631de22-0b9f-4017-baf0-41d8a91735fd from datanode DatanodeRegistration(127.0.0.1:35281, datanodeUuid=caa29ef5-220e-47df-9a82-70d13952f572, infoPort=44333, infoSecurePort=0, ipcPort=42843, storageInfo=lv=-57;cid=testClusterID;nsid=2134441233;c=1731789326364) 2024-11-16T20:35:42,226 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9b915d9feae37402 with lease ID 0x455d738420b546e2: from storage DS-1631de22-0b9f-4017-baf0-41d8a91735fd node DatanodeRegistration(127.0.0.1:35281, datanodeUuid=caa29ef5-220e-47df-9a82-70d13952f572, infoPort=44333, infoSecurePort=0, ipcPort=42843, storageInfo=lv=-57;cid=testClusterID;nsid=2134441233;c=1731789326364), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T20:35:42,226 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9b915d9feae37402 with lease ID 0x455d738420b546e2: Processing first storage report for DS-12d00f89-232e-4fb1-880c-f368e98bfb1e from datanode DatanodeRegistration(127.0.0.1:35281, datanodeUuid=caa29ef5-220e-47df-9a82-70d13952f572, infoPort=44333, infoSecurePort=0, ipcPort=42843, storageInfo=lv=-57;cid=testClusterID;nsid=2134441233;c=1731789326364) 2024-11-16T20:35:42,226 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9b915d9feae37402 with lease ID 0x455d738420b546e2: from storage DS-12d00f89-232e-4fb1-880c-f368e98bfb1e node DatanodeRegistration(127.0.0.1:35281, datanodeUuid=caa29ef5-220e-47df-9a82-70d13952f572, infoPort=44333, infoSecurePort=0, ipcPort=42843, storageInfo=lv=-57;cid=testClusterID;nsid=2134441233;c=1731789326364), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T20:35:42,273 WARN [Thread-889 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64da23ca-2820-0825-2f43-3ba7ac80db0b/cluster_1dc8cece-ac3d-b8bc-ccd8-8c4a0146ef64/data/data9/current/BP-1083962379-172.17.0.2-1731789326364/current, will proceed with Du for space computation calculation, 2024-11-16T20:35:42,273 WARN [Thread-890 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64da23ca-2820-0825-2f43-3ba7ac80db0b/cluster_1dc8cece-ac3d-b8bc-ccd8-8c4a0146ef64/data/data10/current/BP-1083962379-172.17.0.2-1731789326364/current, will proceed with Du for space computation calculation, 2024-11-16T20:35:42,297 WARN [Thread-852 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T20:35:42,299 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc6768a95cd3879e6 with lease ID 0x455d738420b546e3: Processing first storage report for DS-37acd09e-e5ac-4527-955d-3c1799e61593 from datanode DatanodeRegistration(127.0.0.1:45663, datanodeUuid=c84f37a5-7c14-44a3-b5c9-ee2f1d14ec6a, infoPort=45957, infoSecurePort=0, ipcPort=42013, storageInfo=lv=-57;cid=testClusterID;nsid=2134441233;c=1731789326364) 2024-11-16T20:35:42,300 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc6768a95cd3879e6 with lease ID 0x455d738420b546e3: from storage DS-37acd09e-e5ac-4527-955d-3c1799e61593 node DatanodeRegistration(127.0.0.1:45663, datanodeUuid=c84f37a5-7c14-44a3-b5c9-ee2f1d14ec6a, infoPort=45957, infoSecurePort=0, ipcPort=42013, storageInfo=lv=-57;cid=testClusterID;nsid=2134441233;c=1731789326364), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T20:35:42,300 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc6768a95cd3879e6 with lease ID 0x455d738420b546e3: Processing first storage report for DS-2c4f7f8b-1355-46a1-b0e3-acc1e64c1583 from datanode DatanodeRegistration(127.0.0.1:45663, datanodeUuid=c84f37a5-7c14-44a3-b5c9-ee2f1d14ec6a, infoPort=45957, infoSecurePort=0, ipcPort=42013, storageInfo=lv=-57;cid=testClusterID;nsid=2134441233;c=1731789326364) 2024-11-16T20:35:42,300 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc6768a95cd3879e6 with lease ID 0x455d738420b546e3: from storage DS-2c4f7f8b-1355-46a1-b0e3-acc1e64c1583 node DatanodeRegistration(127.0.0.1:45663, datanodeUuid=c84f37a5-7c14-44a3-b5c9-ee2f1d14ec6a, infoPort=45957, infoSecurePort=0, ipcPort=42013, storageInfo=lv=-57;cid=testClusterID;nsid=2134441233;c=1731789326364), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T20:35:42,332 WARN [ResponseProcessor for block BP-1083962379-172.17.0.2-1731789326364:blk_1073741837_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1083962379-172.17.0.2-1731789326364:blk_1073741837_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:35:42,332 WARN [ResponseProcessor for block BP-1083962379-172.17.0.2-1731789326364:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1083962379-172.17.0.2-1731789326364:blk_1073741830_1006 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:35:42,332 WARN [ResponseProcessor for block BP-1083962379-172.17.0.2-1731789326364:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1083962379-172.17.0.2-1731789326364:blk_1073741834_1010 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:35:42,333 WARN [ResponseProcessor for block BP-1083962379-172.17.0.2-1731789326364:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1083962379-172.17.0.2-1731789326364:blk_1073741833_1009 java.io.IOException: Bad response ERROR for BP-1083962379-172.17.0.2-1731789326364:blk_1073741833_1009 from datanode DatanodeInfoWithStorage[127.0.0.1:39211,DS-66984a25-bb75-45b5-9d89-6a192cfe26fd,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:35:42,333 WARN [DataStreamer for file /user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.1731789329530 block BP-1083962379-172.17.0.2-1731789326364:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1083962379-172.17.0.2-1731789326364:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46125,DS-9da48b7c-cdd6-45f8-8c4d-921ff678f746,DISK], DatanodeInfoWithStorage[127.0.0.1:39211,DS-66984a25-bb75-45b5-9d89-6a192cfe26fd,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39211,DS-66984a25-bb75-45b5-9d89-6a192cfe26fd,DISK]) is bad. 2024-11-16T20:35:42,333 WARN [DataStreamer for file /user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta block BP-1083962379-172.17.0.2-1731789326364:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1083962379-172.17.0.2-1731789326364:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39211,DS-66984a25-bb75-45b5-9d89-6a192cfe26fd,DISK], DatanodeInfoWithStorage[127.0.0.1:46125,DS-9da48b7c-cdd6-45f8-8c4d-921ff678f746,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39211,DS-66984a25-bb75-45b5-9d89-6a192cfe26fd,DISK]) is bad. 2024-11-16T20:35:42,334 WARN [DataStreamer for file /user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/MasterData/WALs/40c018648b21,37849,1731789328708/40c018648b21%2C37849%2C1731789328708.1731789329038 block BP-1083962379-172.17.0.2-1731789326364:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1083962379-172.17.0.2-1731789326364:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39211,DS-66984a25-bb75-45b5-9d89-6a192cfe26fd,DISK], DatanodeInfoWithStorage[127.0.0.1:46125,DS-9da48b7c-cdd6-45f8-8c4d-921ff678f746,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39211,DS-66984a25-bb75-45b5-9d89-6a192cfe26fd,DISK]) is bad. 2024-11-16T20:35:42,333 WARN [PacketResponder: BP-1083962379-172.17.0.2-1731789326364:blk_1073741833_1009, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:39211] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T20:35:42,334 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1242280705_22 at /127.0.0.1:58978 [Receiving block BP-1083962379-172.17.0.2-1731789326364:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:46125:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58978 dst: /127.0.0.1:46125 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T20:35:42,334 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1690800550_22 at /127.0.0.1:41008 [Receiving block BP-1083962379-172.17.0.2-1731789326364:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:39211:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41008 dst: /127.0.0.1:39211 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T20:35:42,334 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_11921011_22 at /127.0.0.1:40938 [Receiving block BP-1083962379-172.17.0.2-1731789326364:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:39211:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40938 dst: /127.0.0.1:39211 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T20:35:42,334 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1242280705_22 at /127.0.0.1:58966 [Receiving block BP-1083962379-172.17.0.2-1731789326364:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:46125:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58966 dst: /127.0.0.1:46125 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T20:35:42,335 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_11921011_22 at /127.0.0.1:58946 [Receiving block BP-1083962379-172.17.0.2-1731789326364:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:46125:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58946 dst: /127.0.0.1:46125 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T20:35:42,335 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1242280705_22 at /127.0.0.1:40968 [Receiving block BP-1083962379-172.17.0.2-1731789326364:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:39211:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40968 dst: /127.0.0.1:39211 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T20:35:42,335 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1690800550_22 at /127.0.0.1:59024 [Receiving block BP-1083962379-172.17.0.2-1731789326364:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:46125:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59024 dst: /127.0.0.1:46125 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T20:35:42,333 WARN [DataStreamer for file /user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 block BP-1083962379-172.17.0.2-1731789326364:blk_1073741837_1013 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1083962379-172.17.0.2-1731789326364:blk_1073741837_1013 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39211,DS-66984a25-bb75-45b5-9d89-6a192cfe26fd,DISK], DatanodeInfoWithStorage[127.0.0.1:46125,DS-9da48b7c-cdd6-45f8-8c4d-921ff678f746,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39211,DS-66984a25-bb75-45b5-9d89-6a192cfe26fd,DISK]) is bad. 2024-11-16T20:35:42,334 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1242280705_22 at /127.0.0.1:40978 [Receiving block BP-1083962379-172.17.0.2-1731789326364:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:39211:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40978 dst: /127.0.0.1:39211 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T20:35:42,338 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6cd7b3e{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T20:35:42,339 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5ff95875{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T20:35:42,339 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T20:35:42,339 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@32403ac6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T20:35:42,339 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1f7f19bf{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64da23ca-2820-0825-2f43-3ba7ac80db0b/hadoop.log.dir/,STOPPED} 2024-11-16T20:35:42,341 WARN [BP-1083962379-172.17.0.2-1731789326364 heartbeating to localhost/127.0.0.1:33297 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T20:35:42,341 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T20:35:42,341 WARN [BP-1083962379-172.17.0.2-1731789326364 heartbeating to localhost/127.0.0.1:33297 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1083962379-172.17.0.2-1731789326364 (Datanode Uuid f19a5063-ec0a-4807-a891-570d666b0316) service to localhost/127.0.0.1:33297 2024-11-16T20:35:42,341 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T20:35:42,342 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64da23ca-2820-0825-2f43-3ba7ac80db0b/cluster_1dc8cece-ac3d-b8bc-ccd8-8c4a0146ef64/data/data3/current/BP-1083962379-172.17.0.2-1731789326364 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T20:35:42,342 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64da23ca-2820-0825-2f43-3ba7ac80db0b/cluster_1dc8cece-ac3d-b8bc-ccd8-8c4a0146ef64/data/data4/current/BP-1083962379-172.17.0.2-1731789326364 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T20:35:42,342 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T20:35:42,344 ERROR [org.apache.hadoop.hdfs.server.datanode.DataXceiver@6e4ede9d {}] datanode.DataXceiver(331): 127.0.0.1:46125:DataXceiver error processing unknown operation src: /127.0.0.1:34928 dst: /127.0.0.1:46125 java.io.IOException: Server closed. at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.addPeer(DataXceiverServer.java:334) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:232) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T20:35:42,344 WARN [DataStreamer for file /user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 block BP-1083962379-172.17.0.2-1731789326364:blk_1073741837_1013 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741837_1013 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:35:42,344 WARN [DataStreamer for file /user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta block BP-1083962379-172.17.0.2-1731789326364:blk_1073741834_1010 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741834_1010 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:35:42,344 WARN [DataStreamer for file /user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.1731789329530 block BP-1083962379-172.17.0.2-1731789326364:blk_1073741833_1009 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741833_1009 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:35:42,345 WARN [DataStreamer for file /user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/MasterData/WALs/40c018648b21,37849,1731789328708/40c018648b21%2C37849%2C1731789328708.1731789329038 block BP-1083962379-172.17.0.2-1731789326364:blk_1073741830_1006 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741830_1006 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:35:42,346 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@c77eea{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T20:35:42,346 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1e20426d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T20:35:42,346 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T20:35:42,346 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1bf32f74{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T20:35:42,347 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2cb9bebc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64da23ca-2820-0825-2f43-3ba7ac80db0b/hadoop.log.dir/,STOPPED} 2024-11-16T20:35:42,348 WARN [BP-1083962379-172.17.0.2-1731789326364 heartbeating to localhost/127.0.0.1:33297 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T20:35:42,348 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T20:35:42,348 WARN [BP-1083962379-172.17.0.2-1731789326364 heartbeating to localhost/127.0.0.1:33297 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1083962379-172.17.0.2-1731789326364 (Datanode Uuid 3f2b0ad1-22e5-4f8e-adc2-0f54d4046b23) service to localhost/127.0.0.1:33297 2024-11-16T20:35:42,348 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T20:35:42,349 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64da23ca-2820-0825-2f43-3ba7ac80db0b/cluster_1dc8cece-ac3d-b8bc-ccd8-8c4a0146ef64/data/data1/current/BP-1083962379-172.17.0.2-1731789326364 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T20:35:42,349 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64da23ca-2820-0825-2f43-3ba7ac80db0b/cluster_1dc8cece-ac3d-b8bc-ccd8-8c4a0146ef64/data/data2/current/BP-1083962379-172.17.0.2-1731789326364 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T20:35:42,349 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T20:35:42,353 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnDatanodeDeath', row='row0002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnDatanodeDeath,,1731789330196.cad6a923190659cbe955b214dd2ed352., hostname=40c018648b21,46419,1731789328882, seqNum=2] 2024-11-16T20:35:42,355 ERROR [FSHLog-0-hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65-prefix:40c018648b21,46419,1731789328882 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46125,DS-9da48b7c-cdd6-45f8-8c4d-921ff678f746,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:35:42,355 WARN [FSHLog-0-hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65-prefix:40c018648b21,46419,1731789328882 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46125,DS-9da48b7c-cdd6-45f8-8c4d-921ff678f746,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:35:42,355 DEBUG [regionserver/40c018648b21:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 40c018648b21%2C46419%2C1731789328882:(num 1731789329530) roll requested 2024-11-16T20:35:42,356 INFO [regionserver/40c018648b21:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 40c018648b21%2C46419%2C1731789328882.1731789342355 2024-11-16T20:35:42,367 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:35:42,367 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:35:42,368 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:35:42,368 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:35:42,368 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:35:42,368 INFO [regionserver/40c018648b21:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.1731789329530 with entries=1, filesize=455 B; new WAL /user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.1731789342355 2024-11-16T20:35:42,368 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46125,DS-9da48b7c-cdd6-45f8-8c4d-921ff678f746,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:35:42,369 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46125,DS-9da48b7c-cdd6-45f8-8c4d-921ff678f746,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:35:42,370 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-11-16T20:35:42,370 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-11-16T20:35:42,370 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.1731789329530 2024-11-16T20:35:42,373 WARN [IPC Server handler 1 on default port 33297 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.1731789329530 has not been closed. Lease recovery is in progress. RecoveryId = 1019 for block blk_1073741833_1009 2024-11-16T20:35:42,374 DEBUG [regionserver/40c018648b21:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45957:45957),(127.0.0.1/127.0.0.1:35253:35253)] 2024-11-16T20:35:42,375 DEBUG [regionserver/40c018648b21:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.1731789329530 is not closed yet, will try archiving it next time 2024-11-16T20:35:42,375 INFO [regionserver/40c018648b21:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46125,DS-9da48b7c-cdd6-45f8-8c4d-921ff678f746,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:35:42,376 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.1731789329530 after 4ms 2024-11-16T20:35:43,013 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46125,DS-9da48b7c-cdd6-45f8-8c4d-921ff678f746,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:35:44,152 INFO [regionserver/40c018648b21:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46125,DS-9da48b7c-cdd6-45f8-8c4d-921ff678f746,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:35:44,375 INFO [regionserver/40c018648b21:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46125,DS-9da48b7c-cdd6-45f8-8c4d-921ff678f746,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:35:44,376 INFO [Time-limited test {}] wal.TestLogRolling(261): log.getCurrentFileName(): hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.1731789342355 2024-11-16T20:35:44,377 WARN [ResponseProcessor for block BP-1083962379-172.17.0.2-1731789326364:blk_1073741838_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1083962379-172.17.0.2-1731789326364:blk_1073741838_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:35:44,378 WARN [DataStreamer for file /user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.1731789342355 block BP-1083962379-172.17.0.2-1731789326364:blk_1073741838_1018 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1083962379-172.17.0.2-1731789326364:blk_1073741838_1018 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45663,DS-37acd09e-e5ac-4527-955d-3c1799e61593,DISK], DatanodeInfoWithStorage[127.0.0.1:34869,DS-61c766fc-84d8-4f41-b5fc-76f4226f4319,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45663,DS-37acd09e-e5ac-4527-955d-3c1799e61593,DISK]) is bad. 2024-11-16T20:35:44,378 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1242280705_22 at /127.0.0.1:41762 [Receiving block BP-1083962379-172.17.0.2-1731789326364:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:45663:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41762 dst: /127.0.0.1:45663 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T20:35:44,379 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1242280705_22 at /127.0.0.1:58458 [Receiving block BP-1083962379-172.17.0.2-1731789326364:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:34869:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58458 dst: /127.0.0.1:34869 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T20:35:44,414 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@463983fb{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T20:35:44,414 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@57fef5ae{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T20:35:44,414 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T20:35:44,414 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6b21f544{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T20:35:44,415 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7524e7e9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64da23ca-2820-0825-2f43-3ba7ac80db0b/hadoop.log.dir/,STOPPED} 2024-11-16T20:35:44,416 WARN [BP-1083962379-172.17.0.2-1731789326364 heartbeating to localhost/127.0.0.1:33297 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T20:35:44,416 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T20:35:44,416 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T20:35:44,416 WARN [BP-1083962379-172.17.0.2-1731789326364 heartbeating to localhost/127.0.0.1:33297 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1083962379-172.17.0.2-1731789326364 (Datanode Uuid c84f37a5-7c14-44a3-b5c9-ee2f1d14ec6a) service to localhost/127.0.0.1:33297 2024-11-16T20:35:44,416 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64da23ca-2820-0825-2f43-3ba7ac80db0b/cluster_1dc8cece-ac3d-b8bc-ccd8-8c4a0146ef64/data/data9/current/BP-1083962379-172.17.0.2-1731789326364 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T20:35:44,417 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64da23ca-2820-0825-2f43-3ba7ac80db0b/cluster_1dc8cece-ac3d-b8bc-ccd8-8c4a0146ef64/data/data10/current/BP-1083962379-172.17.0.2-1731789326364 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T20:35:44,417 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T20:35:45,014 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46125,DS-9da48b7c-cdd6-45f8-8c4d-921ff678f746,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:35:46,153 INFO [regionserver/40c018648b21:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46125,DS-9da48b7c-cdd6-45f8-8c4d-921ff678f746,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:35:46,375 INFO [regionserver/40c018648b21:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46125,DS-9da48b7c-cdd6-45f8-8c4d-921ff678f746,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:35:46,376 WARN [regionserver/40c018648b21:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34869,DS-61c766fc-84d8-4f41-b5fc-76f4226f4319,DISK]] 2024-11-16T20:35:46,376 DEBUG [regionserver/40c018648b21:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 40c018648b21%2C46419%2C1731789328882:(num 1731789342355) roll requested 2024-11-16T20:35:46,376 INFO [regionserver/40c018648b21:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 40c018648b21%2C46419%2C1731789328882.1731789346376 2024-11-16T20:35:46,377 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.1731789329530 after 4007ms 2024-11-16T20:35:46,379 WARN [Thread-910 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741839_1021 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:35:46,379 WARN [Thread-910 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1083962379-172.17.0.2-1731789326364:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39211,DS-66984a25-bb75-45b5-9d89-6a192cfe26fd,DISK], DatanodeInfoWithStorage[127.0.0.1:35281,DS-1631de22-0b9f-4017-baf0-41d8a91735fd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39211,DS-66984a25-bb75-45b5-9d89-6a192cfe26fd,DISK]) is bad. 2024-11-16T20:35:46,379 WARN [Thread-910 {}] hdfs.DataStreamer(1850): Abandoning BP-1083962379-172.17.0.2-1731789326364:blk_1073741839_1021 2024-11-16T20:35:46,382 WARN [Thread-910 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39211,DS-66984a25-bb75-45b5-9d89-6a192cfe26fd,DISK] 2024-11-16T20:35:46,386 WARN [Thread-910 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741840_1022 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:45663 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:35:46,386 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1242280705_22 at /127.0.0.1:60958 [Receiving block BP-1083962379-172.17.0.2-1731789326364:blk_1073741840_1022] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64da23ca-2820-0825-2f43-3ba7ac80db0b/cluster_1dc8cece-ac3d-b8bc-ccd8-8c4a0146ef64/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64da23ca-2820-0825-2f43-3ba7ac80db0b/cluster_1dc8cece-ac3d-b8bc-ccd8-8c4a0146ef64/data/data8]'}, localName='127.0.0.1:35281', datanodeUuid='caa29ef5-220e-47df-9a82-70d13952f572', xmitsInProgress=0}:Exception transferring block BP-1083962379-172.17.0.2-1731789326364:blk_1073741840_1022 to mirror 127.0.0.1:45663 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T20:35:46,386 WARN [Thread-910 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1083962379-172.17.0.2-1731789326364:blk_1073741840_1022 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35281,DS-1631de22-0b9f-4017-baf0-41d8a91735fd,DISK], DatanodeInfoWithStorage[127.0.0.1:45663,DS-37acd09e-e5ac-4527-955d-3c1799e61593,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45663,DS-37acd09e-e5ac-4527-955d-3c1799e61593,DISK]) is bad. 2024-11-16T20:35:46,386 WARN [Thread-910 {}] hdfs.DataStreamer(1850): Abandoning BP-1083962379-172.17.0.2-1731789326364:blk_1073741840_1022 2024-11-16T20:35:46,386 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1242280705_22 at /127.0.0.1:60958 [Receiving block BP-1083962379-172.17.0.2-1731789326364:blk_1073741840_1022] {}] datanode.BlockReceiver(316): Block 1073741840 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-16T20:35:46,387 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1242280705_22 at /127.0.0.1:60958 [Receiving block BP-1083962379-172.17.0.2-1731789326364:blk_1073741840_1022] {}] datanode.DataXceiver(331): 127.0.0.1:35281:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60958 dst: /127.0.0.1:35281 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T20:35:46,387 WARN [Thread-910 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45663,DS-37acd09e-e5ac-4527-955d-3c1799e61593,DISK] 2024-11-16T20:35:46,389 WARN [Thread-910 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741841_1023 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:46125 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:35:46,389 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1242280705_22 at /127.0.0.1:58472 [Receiving block BP-1083962379-172.17.0.2-1731789326364:blk_1073741841_1023] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64da23ca-2820-0825-2f43-3ba7ac80db0b/cluster_1dc8cece-ac3d-b8bc-ccd8-8c4a0146ef64/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64da23ca-2820-0825-2f43-3ba7ac80db0b/cluster_1dc8cece-ac3d-b8bc-ccd8-8c4a0146ef64/data/data6]'}, localName='127.0.0.1:34869', datanodeUuid='057d21b3-d974-4667-9e19-feb20522c62e', xmitsInProgress=0}:Exception transferring block BP-1083962379-172.17.0.2-1731789326364:blk_1073741841_1023 to mirror 127.0.0.1:46125 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T20:35:46,390 WARN [Thread-910 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1083962379-172.17.0.2-1731789326364:blk_1073741841_1023 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34869,DS-61c766fc-84d8-4f41-b5fc-76f4226f4319,DISK], DatanodeInfoWithStorage[127.0.0.1:46125,DS-9da48b7c-cdd6-45f8-8c4d-921ff678f746,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:46125,DS-9da48b7c-cdd6-45f8-8c4d-921ff678f746,DISK]) is bad. 2024-11-16T20:35:46,390 WARN [Thread-910 {}] hdfs.DataStreamer(1850): Abandoning BP-1083962379-172.17.0.2-1731789326364:blk_1073741841_1023 2024-11-16T20:35:46,390 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1242280705_22 at /127.0.0.1:58472 [Receiving block BP-1083962379-172.17.0.2-1731789326364:blk_1073741841_1023] {}] datanode.BlockReceiver(316): Block 1073741841 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-16T20:35:46,390 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1242280705_22 at /127.0.0.1:58472 [Receiving block BP-1083962379-172.17.0.2-1731789326364:blk_1073741841_1023] {}] datanode.DataXceiver(331): 127.0.0.1:34869:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58472 dst: /127.0.0.1:34869 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T20:35:46,390 WARN [Thread-910 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46125,DS-9da48b7c-cdd6-45f8-8c4d-921ff678f746,DISK] 2024-11-16T20:35:46,398 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:35:46,398 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:35:46,399 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:35:46,399 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:35:46,399 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:35:46,399 INFO [regionserver/40c018648b21:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.1731789342355 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.1731789346376 2024-11-16T20:35:46,400 DEBUG [regionserver/40c018648b21:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44333:44333),(127.0.0.1/127.0.0.1:35253:35253)] 2024-11-16T20:35:46,400 DEBUG [regionserver/40c018648b21:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.1731789329530 is not closed yet, will try archiving it next time 2024-11-16T20:35:46,400 DEBUG [regionserver/40c018648b21:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.1731789342355 is not closed yet, will try archiving it next time 2024-11-16T20:35:46,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34869 is added to blk_1073741838_1020 (size=2431) 2024-11-16T20:35:46,421 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-16T20:35:46,802 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.1731789329530 is not closed yet, will try archiving it next time 2024-11-16T20:35:47,014 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46125,DS-9da48b7c-cdd6-45f8-8c4d-921ff678f746,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:35:48,153 INFO [regionserver/40c018648b21:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46125,DS-9da48b7c-cdd6-45f8-8c4d-921ff678f746,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:35:48,400 INFO [regionserver/40c018648b21:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46125,DS-9da48b7c-cdd6-45f8-8c4d-921ff678f746,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:35:48,425 WARN [ResponseProcessor for block BP-1083962379-172.17.0.2-1731789326364:blk_1073741842_1024 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1083962379-172.17.0.2-1731789326364:blk_1073741842_1024 java.io.IOException: Bad response ERROR for BP-1083962379-172.17.0.2-1731789326364:blk_1073741842_1024 from datanode DatanodeInfoWithStorage[127.0.0.1:34869,DS-61c766fc-84d8-4f41-b5fc-76f4226f4319,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:35:48,426 WARN [DataStreamer for file /user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.1731789346376 block BP-1083962379-172.17.0.2-1731789326364:blk_1073741842_1024 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1083962379-172.17.0.2-1731789326364:blk_1073741842_1024 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35281,DS-1631de22-0b9f-4017-baf0-41d8a91735fd,DISK], DatanodeInfoWithStorage[127.0.0.1:34869,DS-61c766fc-84d8-4f41-b5fc-76f4226f4319,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34869,DS-61c766fc-84d8-4f41-b5fc-76f4226f4319,DISK]) is bad. 2024-11-16T20:35:48,426 WARN [PacketResponder: BP-1083962379-172.17.0.2-1731789326364:blk_1073741842_1024, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:34869] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T20:35:48,426 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1242280705_22 at /127.0.0.1:60964 [Receiving block BP-1083962379-172.17.0.2-1731789326364:blk_1073741842_1024] {}] datanode.DataXceiver(331): 127.0.0.1:35281:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60964 dst: /127.0.0.1:35281 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T20:35:48,426 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1242280705_22 at /127.0.0.1:58476 [Receiving block BP-1083962379-172.17.0.2-1731789326364:blk_1073741842_1024] {}] datanode.DataXceiver(331): 127.0.0.1:34869:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58476 dst: /127.0.0.1:34869 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T20:35:48,495 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7f55aa3b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T20:35:48,496 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6bd8b5f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T20:35:48,496 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T20:35:48,496 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@30008f24{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T20:35:48,496 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3ad9bbfc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64da23ca-2820-0825-2f43-3ba7ac80db0b/hadoop.log.dir/,STOPPED} 2024-11-16T20:35:48,498 WARN [BP-1083962379-172.17.0.2-1731789326364 heartbeating to localhost/127.0.0.1:33297 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T20:35:48,498 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T20:35:48,498 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T20:35:48,498 WARN [BP-1083962379-172.17.0.2-1731789326364 heartbeating to localhost/127.0.0.1:33297 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1083962379-172.17.0.2-1731789326364 (Datanode Uuid 057d21b3-d974-4667-9e19-feb20522c62e) service to localhost/127.0.0.1:33297 2024-11-16T20:35:48,499 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64da23ca-2820-0825-2f43-3ba7ac80db0b/cluster_1dc8cece-ac3d-b8bc-ccd8-8c4a0146ef64/data/data5/current/BP-1083962379-172.17.0.2-1731789326364 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T20:35:48,499 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64da23ca-2820-0825-2f43-3ba7ac80db0b/cluster_1dc8cece-ac3d-b8bc-ccd8-8c4a0146ef64/data/data6/current/BP-1083962379-172.17.0.2-1731789326364 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T20:35:48,500 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T20:35:48,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46419 {}] regionserver.HRegion(8855): Flush requested on cad6a923190659cbe955b214dd2ed352 2024-11-16T20:35:48,511 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing cad6a923190659cbe955b214dd2ed352 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-16T20:35:48,536 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cad6a923190659cbe955b214dd2ed352/.tmp/info/0960d3fe8bc84b538bef29ad4248d92e is 1080, key is row0002/info:/1731789344418/Put/seqid=0 2024-11-16T20:35:48,539 WARN [Thread-922 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741843_1026 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:45663 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:35:48,539 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1242280705_22 at /127.0.0.1:60978 [Receiving block BP-1083962379-172.17.0.2-1731789326364:blk_1073741843_1026] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64da23ca-2820-0825-2f43-3ba7ac80db0b/cluster_1dc8cece-ac3d-b8bc-ccd8-8c4a0146ef64/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64da23ca-2820-0825-2f43-3ba7ac80db0b/cluster_1dc8cece-ac3d-b8bc-ccd8-8c4a0146ef64/data/data8]'}, localName='127.0.0.1:35281', datanodeUuid='caa29ef5-220e-47df-9a82-70d13952f572', xmitsInProgress=0}:Exception transferring block BP-1083962379-172.17.0.2-1731789326364:blk_1073741843_1026 to mirror 127.0.0.1:45663 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T20:35:48,539 WARN [Thread-922 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1083962379-172.17.0.2-1731789326364:blk_1073741843_1026 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35281,DS-1631de22-0b9f-4017-baf0-41d8a91735fd,DISK], DatanodeInfoWithStorage[127.0.0.1:45663,DS-37acd09e-e5ac-4527-955d-3c1799e61593,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45663,DS-37acd09e-e5ac-4527-955d-3c1799e61593,DISK]) is bad. 2024-11-16T20:35:48,539 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1242280705_22 at /127.0.0.1:60978 [Receiving block BP-1083962379-172.17.0.2-1731789326364:blk_1073741843_1026] {}] datanode.BlockReceiver(316): Block 1073741843 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-16T20:35:48,539 WARN [Thread-922 {}] hdfs.DataStreamer(1850): Abandoning BP-1083962379-172.17.0.2-1731789326364:blk_1073741843_1026 2024-11-16T20:35:48,539 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1242280705_22 at /127.0.0.1:60978 [Receiving block BP-1083962379-172.17.0.2-1731789326364:blk_1073741843_1026] {}] datanode.DataXceiver(331): 127.0.0.1:35281:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60978 dst: /127.0.0.1:35281 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T20:35:48,540 WARN [Thread-922 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45663,DS-37acd09e-e5ac-4527-955d-3c1799e61593,DISK] 2024-11-16T20:35:48,543 WARN [Thread-922 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741844_1027 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:39211 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:35:48,542 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1242280705_22 at /127.0.0.1:60982 [Receiving block BP-1083962379-172.17.0.2-1731789326364:blk_1073741844_1027] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64da23ca-2820-0825-2f43-3ba7ac80db0b/cluster_1dc8cece-ac3d-b8bc-ccd8-8c4a0146ef64/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64da23ca-2820-0825-2f43-3ba7ac80db0b/cluster_1dc8cece-ac3d-b8bc-ccd8-8c4a0146ef64/data/data8]'}, localName='127.0.0.1:35281', datanodeUuid='caa29ef5-220e-47df-9a82-70d13952f572', xmitsInProgress=0}:Exception transferring block BP-1083962379-172.17.0.2-1731789326364:blk_1073741844_1027 to mirror 127.0.0.1:39211 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T20:35:48,543 WARN [Thread-922 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1083962379-172.17.0.2-1731789326364:blk_1073741844_1027 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35281,DS-1631de22-0b9f-4017-baf0-41d8a91735fd,DISK], DatanodeInfoWithStorage[127.0.0.1:39211,DS-66984a25-bb75-45b5-9d89-6a192cfe26fd,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39211,DS-66984a25-bb75-45b5-9d89-6a192cfe26fd,DISK]) is bad. 2024-11-16T20:35:48,543 WARN [Thread-922 {}] hdfs.DataStreamer(1850): Abandoning BP-1083962379-172.17.0.2-1731789326364:blk_1073741844_1027 2024-11-16T20:35:48,543 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1242280705_22 at /127.0.0.1:60982 [Receiving block BP-1083962379-172.17.0.2-1731789326364:blk_1073741844_1027] {}] datanode.BlockReceiver(316): Block 1073741844 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-16T20:35:48,543 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1242280705_22 at /127.0.0.1:60982 [Receiving block BP-1083962379-172.17.0.2-1731789326364:blk_1073741844_1027] {}] datanode.DataXceiver(331): 127.0.0.1:35281:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60982 dst: /127.0.0.1:35281 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T20:35:48,544 WARN [Thread-922 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39211,DS-66984a25-bb75-45b5-9d89-6a192cfe26fd,DISK] 2024-11-16T20:35:48,545 WARN [Thread-922 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741845_1028 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:35:48,546 WARN [Thread-922 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1083962379-172.17.0.2-1731789326364:blk_1073741845_1028 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46125,DS-9da48b7c-cdd6-45f8-8c4d-921ff678f746,DISK], DatanodeInfoWithStorage[127.0.0.1:34869,DS-61c766fc-84d8-4f41-b5fc-76f4226f4319,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46125,DS-9da48b7c-cdd6-45f8-8c4d-921ff678f746,DISK]) is bad. 2024-11-16T20:35:48,546 WARN [Thread-922 {}] hdfs.DataStreamer(1850): Abandoning BP-1083962379-172.17.0.2-1731789326364:blk_1073741845_1028 2024-11-16T20:35:48,547 WARN [Thread-922 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46125,DS-9da48b7c-cdd6-45f8-8c4d-921ff678f746,DISK] 2024-11-16T20:35:48,548 WARN [Thread-922 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741846_1029 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:35:48,549 WARN [Thread-922 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1083962379-172.17.0.2-1731789326364:blk_1073741846_1029 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34869,DS-61c766fc-84d8-4f41-b5fc-76f4226f4319,DISK], DatanodeInfoWithStorage[127.0.0.1:35281,DS-1631de22-0b9f-4017-baf0-41d8a91735fd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34869,DS-61c766fc-84d8-4f41-b5fc-76f4226f4319,DISK]) is bad. 2024-11-16T20:35:48,549 WARN [Thread-922 {}] hdfs.DataStreamer(1850): Abandoning BP-1083962379-172.17.0.2-1731789326364:blk_1073741846_1029 2024-11-16T20:35:48,549 WARN [Thread-922 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34869,DS-61c766fc-84d8-4f41-b5fc-76f4226f4319,DISK] 2024-11-16T20:35:48,550 WARN [IPC Server handler 3 on default port 33297 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-16T20:35:48,550 WARN [IPC Server handler 3 on default port 33297 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-16T20:35:48,550 WARN [IPC Server handler 3 on default port 33297 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-16T20:35:48,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35281 is added to blk_1073741847_1030 (size=10347) 2024-11-16T20:35:48,954 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cad6a923190659cbe955b214dd2ed352/.tmp/info/0960d3fe8bc84b538bef29ad4248d92e 2024-11-16T20:35:48,965 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cad6a923190659cbe955b214dd2ed352/.tmp/info/0960d3fe8bc84b538bef29ad4248d92e as hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cad6a923190659cbe955b214dd2ed352/info/0960d3fe8bc84b538bef29ad4248d92e 2024-11-16T20:35:48,972 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cad6a923190659cbe955b214dd2ed352/info/0960d3fe8bc84b538bef29ad4248d92e, entries=5, sequenceid=11, filesize=10.1 K 2024-11-16T20:35:48,973 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=9.45 KB/9681 for cad6a923190659cbe955b214dd2ed352 in 462ms, sequenceid=11, compaction requested=false 2024-11-16T20:35:48,974 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for cad6a923190659cbe955b214dd2ed352: 2024-11-16T20:35:49,014 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46125,DS-9da48b7c-cdd6-45f8-8c4d-921ff678f746,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:35:49,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46419 {}] regionserver.HRegion(8855): Flush requested on cad6a923190659cbe955b214dd2ed352 2024-11-16T20:35:49,143 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing cad6a923190659cbe955b214dd2ed352 1/1 column families, dataSize=10.50 KB heapSize=11.50 KB 2024-11-16T20:35:49,149 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cad6a923190659cbe955b214dd2ed352/.tmp/info/70ce71f77c614a19adee97c5eb8436a9 is 1080, key is row0007/info:/1731789348513/Put/seqid=0 2024-11-16T20:35:49,151 WARN [Thread-929 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741848_1031 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:35:49,151 WARN [Thread-929 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1083962379-172.17.0.2-1731789326364:blk_1073741848_1031 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34869,DS-61c766fc-84d8-4f41-b5fc-76f4226f4319,DISK], DatanodeInfoWithStorage[127.0.0.1:35281,DS-1631de22-0b9f-4017-baf0-41d8a91735fd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34869,DS-61c766fc-84d8-4f41-b5fc-76f4226f4319,DISK]) is bad. 2024-11-16T20:35:49,151 WARN [Thread-929 {}] hdfs.DataStreamer(1850): Abandoning BP-1083962379-172.17.0.2-1731789326364:blk_1073741848_1031 2024-11-16T20:35:49,152 WARN [Thread-929 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34869,DS-61c766fc-84d8-4f41-b5fc-76f4226f4319,DISK] 2024-11-16T20:35:49,153 WARN [Thread-929 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741849_1032 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:35:49,153 WARN [Thread-929 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1083962379-172.17.0.2-1731789326364:blk_1073741849_1032 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46125,DS-9da48b7c-cdd6-45f8-8c4d-921ff678f746,DISK], DatanodeInfoWithStorage[127.0.0.1:39211,DS-66984a25-bb75-45b5-9d89-6a192cfe26fd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46125,DS-9da48b7c-cdd6-45f8-8c4d-921ff678f746,DISK]) is bad. 2024-11-16T20:35:49,153 WARN [Thread-929 {}] hdfs.DataStreamer(1850): Abandoning BP-1083962379-172.17.0.2-1731789326364:blk_1073741849_1032 2024-11-16T20:35:49,154 WARN [Thread-929 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46125,DS-9da48b7c-cdd6-45f8-8c4d-921ff678f746,DISK] 2024-11-16T20:35:49,156 WARN [Thread-929 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741850_1033 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:35:49,156 WARN [Thread-929 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1083962379-172.17.0.2-1731789326364:blk_1073741850_1033 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45663,DS-37acd09e-e5ac-4527-955d-3c1799e61593,DISK], DatanodeInfoWithStorage[127.0.0.1:35281,DS-1631de22-0b9f-4017-baf0-41d8a91735fd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45663,DS-37acd09e-e5ac-4527-955d-3c1799e61593,DISK]) is bad. 2024-11-16T20:35:49,156 WARN [Thread-929 {}] hdfs.DataStreamer(1850): Abandoning BP-1083962379-172.17.0.2-1731789326364:blk_1073741850_1033 2024-11-16T20:35:49,157 WARN [Thread-929 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45663,DS-37acd09e-e5ac-4527-955d-3c1799e61593,DISK] 2024-11-16T20:35:49,159 WARN [Thread-929 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741851_1034 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:35:49,159 WARN [Thread-929 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1083962379-172.17.0.2-1731789326364:blk_1073741851_1034 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39211,DS-66984a25-bb75-45b5-9d89-6a192cfe26fd,DISK], DatanodeInfoWithStorage[127.0.0.1:35281,DS-1631de22-0b9f-4017-baf0-41d8a91735fd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39211,DS-66984a25-bb75-45b5-9d89-6a192cfe26fd,DISK]) is bad. 2024-11-16T20:35:49,159 WARN [Thread-929 {}] hdfs.DataStreamer(1850): Abandoning BP-1083962379-172.17.0.2-1731789326364:blk_1073741851_1034 2024-11-16T20:35:49,160 WARN [Thread-929 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39211,DS-66984a25-bb75-45b5-9d89-6a192cfe26fd,DISK] 2024-11-16T20:35:49,161 WARN [IPC Server handler 3 on default port 33297 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-16T20:35:49,161 WARN [IPC Server handler 3 on default port 33297 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-16T20:35:49,161 WARN [IPC Server handler 3 on default port 33297 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-16T20:35:49,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35281 is added to blk_1073741852_1035 (size=12506) 2024-11-16T20:35:49,566 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.50 KB at sequenceid=24 (bloomFilter=true), to=hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cad6a923190659cbe955b214dd2ed352/.tmp/info/70ce71f77c614a19adee97c5eb8436a9 2024-11-16T20:35:49,574 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cad6a923190659cbe955b214dd2ed352/.tmp/info/70ce71f77c614a19adee97c5eb8436a9 as hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cad6a923190659cbe955b214dd2ed352/info/70ce71f77c614a19adee97c5eb8436a9 2024-11-16T20:35:49,582 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cad6a923190659cbe955b214dd2ed352/info/70ce71f77c614a19adee97c5eb8436a9, entries=7, sequenceid=24, filesize=12.2 K 2024-11-16T20:35:49,583 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.50 KB/10756, heapSize ~11.48 KB/11760, currentSize=2.10 KB/2150 for cad6a923190659cbe955b214dd2ed352 in 441ms, sequenceid=24, compaction requested=false 2024-11-16T20:35:49,583 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for cad6a923190659cbe955b214dd2ed352: 2024-11-16T20:35:49,583 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=22.3 K, sizeToCheck=16.0 K 2024-11-16T20:35:49,583 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T20:35:49,584 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cad6a923190659cbe955b214dd2ed352/info/70ce71f77c614a19adee97c5eb8436a9 because midkey is the same as first or last row 2024-11-16T20:35:50,153 INFO [regionserver/40c018648b21:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46125,DS-9da48b7c-cdd6-45f8-8c4d-921ff678f746,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:35:50,401 INFO [regionserver/40c018648b21:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46125,DS-9da48b7c-cdd6-45f8-8c4d-921ff678f746,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:35:50,401 WARN [regionserver/40c018648b21:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35281,DS-1631de22-0b9f-4017-baf0-41d8a91735fd,DISK]] 2024-11-16T20:35:50,401 DEBUG [regionserver/40c018648b21:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 40c018648b21%2C46419%2C1731789328882:(num 1731789346376) roll requested 2024-11-16T20:35:50,402 INFO [regionserver/40c018648b21:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 40c018648b21%2C46419%2C1731789328882.1731789350401 2024-11-16T20:35:50,406 WARN [Thread-933 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741853_1036 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:35:50,406 WARN [Thread-933 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1083962379-172.17.0.2-1731789326364:blk_1073741853_1036 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34869,DS-61c766fc-84d8-4f41-b5fc-76f4226f4319,DISK], DatanodeInfoWithStorage[127.0.0.1:46125,DS-9da48b7c-cdd6-45f8-8c4d-921ff678f746,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34869,DS-61c766fc-84d8-4f41-b5fc-76f4226f4319,DISK]) is bad. 2024-11-16T20:35:50,406 WARN [Thread-933 {}] hdfs.DataStreamer(1850): Abandoning BP-1083962379-172.17.0.2-1731789326364:blk_1073741853_1036 2024-11-16T20:35:50,407 WARN [Thread-933 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34869,DS-61c766fc-84d8-4f41-b5fc-76f4226f4319,DISK] 2024-11-16T20:35:50,408 WARN [Thread-933 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741854_1037 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:35:50,409 WARN [Thread-933 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1083962379-172.17.0.2-1731789326364:blk_1073741854_1037 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39211,DS-66984a25-bb75-45b5-9d89-6a192cfe26fd,DISK], DatanodeInfoWithStorage[127.0.0.1:46125,DS-9da48b7c-cdd6-45f8-8c4d-921ff678f746,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39211,DS-66984a25-bb75-45b5-9d89-6a192cfe26fd,DISK]) is bad. 2024-11-16T20:35:50,409 WARN [Thread-933 {}] hdfs.DataStreamer(1850): Abandoning BP-1083962379-172.17.0.2-1731789326364:blk_1073741854_1037 2024-11-16T20:35:50,410 WARN [Thread-933 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39211,DS-66984a25-bb75-45b5-9d89-6a192cfe26fd,DISK] 2024-11-16T20:35:50,413 WARN [Thread-933 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741855_1038 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:45663 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:35:50,412 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1242280705_22 at /127.0.0.1:32776 [Receiving block BP-1083962379-172.17.0.2-1731789326364:blk_1073741855_1038] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64da23ca-2820-0825-2f43-3ba7ac80db0b/cluster_1dc8cece-ac3d-b8bc-ccd8-8c4a0146ef64/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64da23ca-2820-0825-2f43-3ba7ac80db0b/cluster_1dc8cece-ac3d-b8bc-ccd8-8c4a0146ef64/data/data8]'}, localName='127.0.0.1:35281', datanodeUuid='caa29ef5-220e-47df-9a82-70d13952f572', xmitsInProgress=0}:Exception transferring block BP-1083962379-172.17.0.2-1731789326364:blk_1073741855_1038 to mirror 127.0.0.1:45663 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T20:35:50,413 WARN [Thread-933 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1083962379-172.17.0.2-1731789326364:blk_1073741855_1038 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35281,DS-1631de22-0b9f-4017-baf0-41d8a91735fd,DISK], DatanodeInfoWithStorage[127.0.0.1:45663,DS-37acd09e-e5ac-4527-955d-3c1799e61593,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45663,DS-37acd09e-e5ac-4527-955d-3c1799e61593,DISK]) is bad. 2024-11-16T20:35:50,413 WARN [Thread-933 {}] hdfs.DataStreamer(1850): Abandoning BP-1083962379-172.17.0.2-1731789326364:blk_1073741855_1038 2024-11-16T20:35:50,413 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1242280705_22 at /127.0.0.1:32776 [Receiving block BP-1083962379-172.17.0.2-1731789326364:blk_1073741855_1038] {}] datanode.BlockReceiver(316): Block 1073741855 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-16T20:35:50,413 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1242280705_22 at /127.0.0.1:32776 [Receiving block BP-1083962379-172.17.0.2-1731789326364:blk_1073741855_1038] {}] datanode.DataXceiver(331): 127.0.0.1:35281:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:32776 dst: /127.0.0.1:35281 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T20:35:50,414 WARN [Thread-933 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45663,DS-37acd09e-e5ac-4527-955d-3c1799e61593,DISK] 2024-11-16T20:35:50,415 WARN [Thread-933 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741856_1039 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:35:50,416 WARN [Thread-933 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1083962379-172.17.0.2-1731789326364:blk_1073741856_1039 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46125,DS-9da48b7c-cdd6-45f8-8c4d-921ff678f746,DISK], DatanodeInfoWithStorage[127.0.0.1:35281,DS-1631de22-0b9f-4017-baf0-41d8a91735fd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46125,DS-9da48b7c-cdd6-45f8-8c4d-921ff678f746,DISK]) is bad. 2024-11-16T20:35:50,416 WARN [Thread-933 {}] hdfs.DataStreamer(1850): Abandoning BP-1083962379-172.17.0.2-1731789326364:blk_1073741856_1039 2024-11-16T20:35:50,416 WARN [Thread-933 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46125,DS-9da48b7c-cdd6-45f8-8c4d-921ff678f746,DISK] 2024-11-16T20:35:50,417 WARN [IPC Server handler 1 on default port 33297 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-16T20:35:50,417 WARN [IPC Server handler 1 on default port 33297 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-16T20:35:50,418 WARN [IPC Server handler 1 on default port 33297 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-16T20:35:50,421 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:35:50,421 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:35:50,421 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:35:50,422 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:35:50,422 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:35:50,422 INFO [regionserver/40c018648b21:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.1731789346376 with entries=25, filesize=25.38 KB; new WAL /user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.1731789350401 2024-11-16T20:35:50,423 DEBUG [regionserver/40c018648b21:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44333:44333)] 2024-11-16T20:35:50,423 DEBUG [regionserver/40c018648b21:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.1731789329530 is not closed yet, will try archiving it next time 2024-11-16T20:35:50,423 DEBUG [regionserver/40c018648b21:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.1731789346376 is not closed yet, will try archiving it next time 2024-11-16T20:35:50,424 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.1731789342355 to hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/oldWALs/40c018648b21%2C46419%2C1731789328882.1731789342355 2024-11-16T20:35:50,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35281 is added to blk_1073741842_1025 (size=25992) 2024-11-16T20:35:50,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46419 {}] regionserver.HRegion(8855): Flush requested on cad6a923190659cbe955b214dd2ed352 2024-11-16T20:35:50,567 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing cad6a923190659cbe955b214dd2ed352 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-16T20:35:50,575 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cad6a923190659cbe955b214dd2ed352/.tmp/info/a1786a39b8684d48bf66d9c24d7c95cc is 1079, key is tmprow/info:/1731789350565/Put/seqid=0 2024-11-16T20:35:50,577 WARN [Thread-938 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741858_1041 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:35:50,577 WARN [Thread-938 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1083962379-172.17.0.2-1731789326364:blk_1073741858_1041 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46125,DS-9da48b7c-cdd6-45f8-8c4d-921ff678f746,DISK], DatanodeInfoWithStorage[127.0.0.1:45663,DS-37acd09e-e5ac-4527-955d-3c1799e61593,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46125,DS-9da48b7c-cdd6-45f8-8c4d-921ff678f746,DISK]) is bad. 2024-11-16T20:35:50,577 WARN [Thread-938 {}] hdfs.DataStreamer(1850): Abandoning BP-1083962379-172.17.0.2-1731789326364:blk_1073741858_1041 2024-11-16T20:35:50,578 WARN [Thread-938 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46125,DS-9da48b7c-cdd6-45f8-8c4d-921ff678f746,DISK] 2024-11-16T20:35:50,580 WARN [Thread-938 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741859_1042 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:45663 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:35:50,580 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1242280705_22 at /127.0.0.1:32796 [Receiving block BP-1083962379-172.17.0.2-1731789326364:blk_1073741859_1042] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64da23ca-2820-0825-2f43-3ba7ac80db0b/cluster_1dc8cece-ac3d-b8bc-ccd8-8c4a0146ef64/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64da23ca-2820-0825-2f43-3ba7ac80db0b/cluster_1dc8cece-ac3d-b8bc-ccd8-8c4a0146ef64/data/data8]'}, localName='127.0.0.1:35281', datanodeUuid='caa29ef5-220e-47df-9a82-70d13952f572', xmitsInProgress=0}:Exception transferring block BP-1083962379-172.17.0.2-1731789326364:blk_1073741859_1042 to mirror 127.0.0.1:45663 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T20:35:50,581 WARN [Thread-938 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1083962379-172.17.0.2-1731789326364:blk_1073741859_1042 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35281,DS-1631de22-0b9f-4017-baf0-41d8a91735fd,DISK], DatanodeInfoWithStorage[127.0.0.1:45663,DS-37acd09e-e5ac-4527-955d-3c1799e61593,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45663,DS-37acd09e-e5ac-4527-955d-3c1799e61593,DISK]) is bad. 2024-11-16T20:35:50,581 WARN [Thread-938 {}] hdfs.DataStreamer(1850): Abandoning BP-1083962379-172.17.0.2-1731789326364:blk_1073741859_1042 2024-11-16T20:35:50,581 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1242280705_22 at /127.0.0.1:32796 [Receiving block BP-1083962379-172.17.0.2-1731789326364:blk_1073741859_1042] {}] datanode.BlockReceiver(316): Block 1073741859 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-16T20:35:50,581 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1242280705_22 at /127.0.0.1:32796 [Receiving block BP-1083962379-172.17.0.2-1731789326364:blk_1073741859_1042] {}] datanode.DataXceiver(331): 127.0.0.1:35281:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:32796 dst: /127.0.0.1:35281 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T20:35:50,581 WARN [Thread-938 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45663,DS-37acd09e-e5ac-4527-955d-3c1799e61593,DISK] 2024-11-16T20:35:50,583 WARN [Thread-938 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741860_1043 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:35:50,583 WARN [Thread-938 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1083962379-172.17.0.2-1731789326364:blk_1073741860_1043 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34869,DS-61c766fc-84d8-4f41-b5fc-76f4226f4319,DISK], DatanodeInfoWithStorage[127.0.0.1:35281,DS-1631de22-0b9f-4017-baf0-41d8a91735fd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34869,DS-61c766fc-84d8-4f41-b5fc-76f4226f4319,DISK]) is bad. 2024-11-16T20:35:50,583 WARN [Thread-938 {}] hdfs.DataStreamer(1850): Abandoning BP-1083962379-172.17.0.2-1731789326364:blk_1073741860_1043 2024-11-16T20:35:50,583 WARN [Thread-938 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34869,DS-61c766fc-84d8-4f41-b5fc-76f4226f4319,DISK] 2024-11-16T20:35:50,585 WARN [Thread-938 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741861_1044 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:35:50,585 WARN [Thread-938 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1083962379-172.17.0.2-1731789326364:blk_1073741861_1044 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39211,DS-66984a25-bb75-45b5-9d89-6a192cfe26fd,DISK], DatanodeInfoWithStorage[127.0.0.1:35281,DS-1631de22-0b9f-4017-baf0-41d8a91735fd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39211,DS-66984a25-bb75-45b5-9d89-6a192cfe26fd,DISK]) is bad. 2024-11-16T20:35:50,585 WARN [Thread-938 {}] hdfs.DataStreamer(1850): Abandoning BP-1083962379-172.17.0.2-1731789326364:blk_1073741861_1044 2024-11-16T20:35:50,586 WARN [Thread-938 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39211,DS-66984a25-bb75-45b5-9d89-6a192cfe26fd,DISK] 2024-11-16T20:35:50,587 WARN [IPC Server handler 2 on default port 33297 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-16T20:35:50,587 WARN [IPC Server handler 2 on default port 33297 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-16T20:35:50,587 WARN [IPC Server handler 2 on default port 33297 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-16T20:35:50,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35281 is added to blk_1073741862_1045 (size=6027) 2024-11-16T20:35:50,826 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.1731789329530 is not closed yet, will try archiving it next time 2024-11-16T20:35:50,991 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=34 (bloomFilter=true), to=hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cad6a923190659cbe955b214dd2ed352/.tmp/info/a1786a39b8684d48bf66d9c24d7c95cc 2024-11-16T20:35:50,999 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cad6a923190659cbe955b214dd2ed352/.tmp/info/a1786a39b8684d48bf66d9c24d7c95cc as hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cad6a923190659cbe955b214dd2ed352/info/a1786a39b8684d48bf66d9c24d7c95cc 2024-11-16T20:35:51,007 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cad6a923190659cbe955b214dd2ed352/info/a1786a39b8684d48bf66d9c24d7c95cc, entries=1, sequenceid=34, filesize=5.9 K 2024-11-16T20:35:51,008 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for cad6a923190659cbe955b214dd2ed352 in 441ms, sequenceid=34, compaction requested=true 2024-11-16T20:35:51,009 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for cad6a923190659cbe955b214dd2ed352: 2024-11-16T20:35:51,009 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.2 K, sizeToCheck=16.0 K 2024-11-16T20:35:51,009 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T20:35:51,009 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cad6a923190659cbe955b214dd2ed352/info/70ce71f77c614a19adee97c5eb8436a9 because midkey is the same as first or last row 2024-11-16T20:35:51,009 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cad6a923190659cbe955b214dd2ed352:info, priority=-2147483648, current under compaction store size is 1 2024-11-16T20:35:51,009 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T20:35:51,009 DEBUG [RS:0;40c018648b21:46419-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-16T20:35:51,011 DEBUG [RS:0;40c018648b21:46419-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 28880 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-16T20:35:51,011 DEBUG [RS:0;40c018648b21:46419-shortCompactions-0 {}] regionserver.HStore(1541): cad6a923190659cbe955b214dd2ed352/info is initiating minor compaction (all files) 2024-11-16T20:35:51,011 INFO [RS:0;40c018648b21:46419-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of cad6a923190659cbe955b214dd2ed352/info in TestLogRolling-testLogRollOnDatanodeDeath,,1731789330196.cad6a923190659cbe955b214dd2ed352. 2024-11-16T20:35:51,011 INFO [RS:0;40c018648b21:46419-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cad6a923190659cbe955b214dd2ed352/info/0960d3fe8bc84b538bef29ad4248d92e, hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cad6a923190659cbe955b214dd2ed352/info/70ce71f77c614a19adee97c5eb8436a9, hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cad6a923190659cbe955b214dd2ed352/info/a1786a39b8684d48bf66d9c24d7c95cc] into tmpdir=hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cad6a923190659cbe955b214dd2ed352/.tmp, totalSize=28.2 K 2024-11-16T20:35:51,012 DEBUG [RS:0;40c018648b21:46419-shortCompactions-0 {}] compactions.Compactor(225): Compacting 0960d3fe8bc84b538bef29ad4248d92e, keycount=5, bloomtype=ROW, size=10.1 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731789344418 2024-11-16T20:35:51,012 DEBUG [RS:0;40c018648b21:46419-shortCompactions-0 {}] compactions.Compactor(225): Compacting 70ce71f77c614a19adee97c5eb8436a9, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=24, earliestPutTs=1731789348513 2024-11-16T20:35:51,013 DEBUG [RS:0;40c018648b21:46419-shortCompactions-0 {}] compactions.Compactor(225): Compacting a1786a39b8684d48bf66d9c24d7c95cc, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1731789350565 2024-11-16T20:35:51,015 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46125,DS-9da48b7c-cdd6-45f8-8c4d-921ff678f746,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:35:51,029 INFO [RS:0;40c018648b21:46419-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): cad6a923190659cbe955b214dd2ed352#info#compaction#21 average throughput is 6.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-16T20:35:51,030 DEBUG [RS:0;40c018648b21:46419-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cad6a923190659cbe955b214dd2ed352/.tmp/info/f5039cfa0e684093985bd480f0b7c9e1 is 1080, key is row0002/info:/1731789344418/Put/seqid=0 2024-11-16T20:35:51,032 WARN [Thread-945 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741863_1046 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:45663 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:35:51,032 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1242280705_22 at /127.0.0.1:32812 [Receiving block BP-1083962379-172.17.0.2-1731789326364:blk_1073741863_1046] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64da23ca-2820-0825-2f43-3ba7ac80db0b/cluster_1dc8cece-ac3d-b8bc-ccd8-8c4a0146ef64/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64da23ca-2820-0825-2f43-3ba7ac80db0b/cluster_1dc8cece-ac3d-b8bc-ccd8-8c4a0146ef64/data/data8]'}, localName='127.0.0.1:35281', datanodeUuid='caa29ef5-220e-47df-9a82-70d13952f572', xmitsInProgress=0}:Exception transferring block BP-1083962379-172.17.0.2-1731789326364:blk_1073741863_1046 to mirror 127.0.0.1:45663 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T20:35:51,033 WARN [Thread-945 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1083962379-172.17.0.2-1731789326364:blk_1073741863_1046 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35281,DS-1631de22-0b9f-4017-baf0-41d8a91735fd,DISK], DatanodeInfoWithStorage[127.0.0.1:45663,DS-37acd09e-e5ac-4527-955d-3c1799e61593,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45663,DS-37acd09e-e5ac-4527-955d-3c1799e61593,DISK]) is bad. 2024-11-16T20:35:51,033 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1242280705_22 at /127.0.0.1:32812 [Receiving block BP-1083962379-172.17.0.2-1731789326364:blk_1073741863_1046] {}] datanode.BlockReceiver(316): Block 1073741863 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-16T20:35:51,033 WARN [Thread-945 {}] hdfs.DataStreamer(1850): Abandoning BP-1083962379-172.17.0.2-1731789326364:blk_1073741863_1046 2024-11-16T20:35:51,033 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1242280705_22 at /127.0.0.1:32812 [Receiving block BP-1083962379-172.17.0.2-1731789326364:blk_1073741863_1046] {}] datanode.DataXceiver(331): 127.0.0.1:35281:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:32812 dst: /127.0.0.1:35281 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T20:35:51,033 WARN [Thread-945 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45663,DS-37acd09e-e5ac-4527-955d-3c1799e61593,DISK] 2024-11-16T20:35:51,035 WARN [Thread-945 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741864_1047 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:35:51,035 WARN [Thread-945 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1083962379-172.17.0.2-1731789326364:blk_1073741864_1047 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46125,DS-9da48b7c-cdd6-45f8-8c4d-921ff678f746,DISK], DatanodeInfoWithStorage[127.0.0.1:35281,DS-1631de22-0b9f-4017-baf0-41d8a91735fd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46125,DS-9da48b7c-cdd6-45f8-8c4d-921ff678f746,DISK]) is bad. 2024-11-16T20:35:51,035 WARN [Thread-945 {}] hdfs.DataStreamer(1850): Abandoning BP-1083962379-172.17.0.2-1731789326364:blk_1073741864_1047 2024-11-16T20:35:51,036 WARN [Thread-945 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46125,DS-9da48b7c-cdd6-45f8-8c4d-921ff678f746,DISK] 2024-11-16T20:35:51,037 WARN [Thread-945 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741865_1048 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:35:51,037 WARN [Thread-945 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1083962379-172.17.0.2-1731789326364:blk_1073741865_1048 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34869,DS-61c766fc-84d8-4f41-b5fc-76f4226f4319,DISK], DatanodeInfoWithStorage[127.0.0.1:39211,DS-66984a25-bb75-45b5-9d89-6a192cfe26fd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34869,DS-61c766fc-84d8-4f41-b5fc-76f4226f4319,DISK]) is bad. 2024-11-16T20:35:51,037 WARN [Thread-945 {}] hdfs.DataStreamer(1850): Abandoning BP-1083962379-172.17.0.2-1731789326364:blk_1073741865_1048 2024-11-16T20:35:51,038 WARN [Thread-945 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34869,DS-61c766fc-84d8-4f41-b5fc-76f4226f4319,DISK] 2024-11-16T20:35:51,039 WARN [Thread-945 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741866_1049 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:35:51,039 WARN [Thread-945 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1083962379-172.17.0.2-1731789326364:blk_1073741866_1049 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39211,DS-66984a25-bb75-45b5-9d89-6a192cfe26fd,DISK], DatanodeInfoWithStorage[127.0.0.1:35281,DS-1631de22-0b9f-4017-baf0-41d8a91735fd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39211,DS-66984a25-bb75-45b5-9d89-6a192cfe26fd,DISK]) is bad. 2024-11-16T20:35:51,039 WARN [Thread-945 {}] hdfs.DataStreamer(1850): Abandoning BP-1083962379-172.17.0.2-1731789326364:blk_1073741866_1049 2024-11-16T20:35:51,040 WARN [Thread-945 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39211,DS-66984a25-bb75-45b5-9d89-6a192cfe26fd,DISK] 2024-11-16T20:35:51,041 WARN [IPC Server handler 3 on default port 33297 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-16T20:35:51,041 WARN [IPC Server handler 3 on default port 33297 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-16T20:35:51,041 WARN [IPC Server handler 3 on default port 33297 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-16T20:35:51,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35281 is added to blk_1073741867_1050 (size=17994) 2024-11-16T20:35:51,233 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@447e22ba[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:35281, datanodeUuid=caa29ef5-220e-47df-9a82-70d13952f572, infoPort=44333, infoSecurePort=0, ipcPort=42843, storageInfo=lv=-57;cid=testClusterID;nsid=2134441233;c=1731789326364):Failed to transfer BP-1083962379-172.17.0.2-1731789326364:blk_1073741847_1030 to 127.0.0.1:45663 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T20:35:51,233 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@27b56a95[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:35281, datanodeUuid=caa29ef5-220e-47df-9a82-70d13952f572, infoPort=44333, infoSecurePort=0, ipcPort=42843, storageInfo=lv=-57;cid=testClusterID;nsid=2134441233;c=1731789326364):Failed to transfer BP-1083962379-172.17.0.2-1731789326364:blk_1073741852_1035 to 127.0.0.1:46125 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T20:35:51,457 DEBUG [RS:0;40c018648b21:46419-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cad6a923190659cbe955b214dd2ed352/.tmp/info/f5039cfa0e684093985bd480f0b7c9e1 as hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cad6a923190659cbe955b214dd2ed352/info/f5039cfa0e684093985bd480f0b7c9e1 2024-11-16T20:35:51,465 INFO [RS:0;40c018648b21:46419-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in cad6a923190659cbe955b214dd2ed352/info of cad6a923190659cbe955b214dd2ed352 into f5039cfa0e684093985bd480f0b7c9e1(size=17.6 K), total size for store is 17.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-16T20:35:51,465 DEBUG [RS:0;40c018648b21:46419-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for cad6a923190659cbe955b214dd2ed352: 2024-11-16T20:35:51,465 INFO [RS:0;40c018648b21:46419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1731789330196.cad6a923190659cbe955b214dd2ed352., storeName=cad6a923190659cbe955b214dd2ed352/info, priority=13, startTime=1731789351009; duration=0sec 2024-11-16T20:35:51,465 DEBUG [RS:0;40c018648b21:46419-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-16T20:35:51,465 DEBUG [RS:0;40c018648b21:46419-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T20:35:51,465 DEBUG [RS:0;40c018648b21:46419-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cad6a923190659cbe955b214dd2ed352/info/f5039cfa0e684093985bd480f0b7c9e1 because midkey is the same as first or last row 2024-11-16T20:35:51,465 DEBUG [RS:0;40c018648b21:46419-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-16T20:35:51,466 DEBUG [RS:0;40c018648b21:46419-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T20:35:51,466 DEBUG [RS:0;40c018648b21:46419-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cad6a923190659cbe955b214dd2ed352/info/f5039cfa0e684093985bd480f0b7c9e1 because midkey is the same as first or last row 2024-11-16T20:35:51,466 DEBUG [RS:0;40c018648b21:46419-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-16T20:35:51,466 DEBUG [RS:0;40c018648b21:46419-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T20:35:51,466 DEBUG [RS:0;40c018648b21:46419-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cad6a923190659cbe955b214dd2ed352/info/f5039cfa0e684093985bd480f0b7c9e1 because midkey is the same as first or last row 2024-11-16T20:35:51,466 DEBUG [RS:0;40c018648b21:46419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T20:35:51,466 DEBUG [RS:0;40c018648b21:46419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cad6a923190659cbe955b214dd2ed352:info 2024-11-16T20:35:51,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46419 {}] regionserver.HRegion(8855): Flush requested on cad6a923190659cbe955b214dd2ed352 2024-11-16T20:35:51,989 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing cad6a923190659cbe955b214dd2ed352 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-16T20:35:51,995 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cad6a923190659cbe955b214dd2ed352/.tmp/info/d7a2097c1a204068882ae16482ed157d is 1079, key is tmprow/info:/1731789351987/Put/seqid=0 2024-11-16T20:35:51,998 WARN [Thread-953 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741868_1051 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:35:51,998 WARN [Thread-953 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1083962379-172.17.0.2-1731789326364:blk_1073741868_1051 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45663,DS-37acd09e-e5ac-4527-955d-3c1799e61593,DISK], DatanodeInfoWithStorage[127.0.0.1:34869,DS-61c766fc-84d8-4f41-b5fc-76f4226f4319,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45663,DS-37acd09e-e5ac-4527-955d-3c1799e61593,DISK]) is bad. 2024-11-16T20:35:51,998 WARN [Thread-953 {}] hdfs.DataStreamer(1850): Abandoning BP-1083962379-172.17.0.2-1731789326364:blk_1073741868_1051 2024-11-16T20:35:51,999 WARN [Thread-953 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45663,DS-37acd09e-e5ac-4527-955d-3c1799e61593,DISK] 2024-11-16T20:35:52,000 WARN [Thread-953 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741869_1052 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:35:52,001 WARN [Thread-953 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1083962379-172.17.0.2-1731789326364:blk_1073741869_1052 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39211,DS-66984a25-bb75-45b5-9d89-6a192cfe26fd,DISK], DatanodeInfoWithStorage[127.0.0.1:34869,DS-61c766fc-84d8-4f41-b5fc-76f4226f4319,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39211,DS-66984a25-bb75-45b5-9d89-6a192cfe26fd,DISK]) is bad. 2024-11-16T20:35:52,001 WARN [Thread-953 {}] hdfs.DataStreamer(1850): Abandoning BP-1083962379-172.17.0.2-1731789326364:blk_1073741869_1052 2024-11-16T20:35:52,001 WARN [Thread-953 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39211,DS-66984a25-bb75-45b5-9d89-6a192cfe26fd,DISK] 2024-11-16T20:35:52,003 WARN [Thread-953 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741870_1053 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:35:52,003 WARN [Thread-953 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1083962379-172.17.0.2-1731789326364:blk_1073741870_1053 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46125,DS-9da48b7c-cdd6-45f8-8c4d-921ff678f746,DISK], DatanodeInfoWithStorage[127.0.0.1:34869,DS-61c766fc-84d8-4f41-b5fc-76f4226f4319,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46125,DS-9da48b7c-cdd6-45f8-8c4d-921ff678f746,DISK]) is bad. 2024-11-16T20:35:52,003 WARN [Thread-953 {}] hdfs.DataStreamer(1850): Abandoning BP-1083962379-172.17.0.2-1731789326364:blk_1073741870_1053 2024-11-16T20:35:52,003 WARN [Thread-953 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46125,DS-9da48b7c-cdd6-45f8-8c4d-921ff678f746,DISK] 2024-11-16T20:35:52,006 WARN [Thread-953 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741871_1054 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34869 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:35:52,006 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1242280705_22 at /127.0.0.1:47300 [Receiving block BP-1083962379-172.17.0.2-1731789326364:blk_1073741871_1054] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64da23ca-2820-0825-2f43-3ba7ac80db0b/cluster_1dc8cece-ac3d-b8bc-ccd8-8c4a0146ef64/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64da23ca-2820-0825-2f43-3ba7ac80db0b/cluster_1dc8cece-ac3d-b8bc-ccd8-8c4a0146ef64/data/data8]'}, localName='127.0.0.1:35281', datanodeUuid='caa29ef5-220e-47df-9a82-70d13952f572', xmitsInProgress=0}:Exception transferring block BP-1083962379-172.17.0.2-1731789326364:blk_1073741871_1054 to mirror 127.0.0.1:34869 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T20:35:52,006 WARN [Thread-953 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1083962379-172.17.0.2-1731789326364:blk_1073741871_1054 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35281,DS-1631de22-0b9f-4017-baf0-41d8a91735fd,DISK], DatanodeInfoWithStorage[127.0.0.1:34869,DS-61c766fc-84d8-4f41-b5fc-76f4226f4319,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34869,DS-61c766fc-84d8-4f41-b5fc-76f4226f4319,DISK]) is bad. 2024-11-16T20:35:52,006 WARN [Thread-953 {}] hdfs.DataStreamer(1850): Abandoning BP-1083962379-172.17.0.2-1731789326364:blk_1073741871_1054 2024-11-16T20:35:52,006 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1242280705_22 at /127.0.0.1:47300 [Receiving block BP-1083962379-172.17.0.2-1731789326364:blk_1073741871_1054] {}] datanode.BlockReceiver(316): Block 1073741871 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-16T20:35:52,006 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1242280705_22 at /127.0.0.1:47300 [Receiving block BP-1083962379-172.17.0.2-1731789326364:blk_1073741871_1054] {}] datanode.DataXceiver(331): 127.0.0.1:35281:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47300 dst: /127.0.0.1:35281 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T20:35:52,007 WARN [Thread-953 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34869,DS-61c766fc-84d8-4f41-b5fc-76f4226f4319,DISK] 2024-11-16T20:35:52,007 WARN [IPC Server handler 0 on default port 33297 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-16T20:35:52,007 WARN [IPC Server handler 0 on default port 33297 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-16T20:35:52,008 WARN [IPC Server handler 0 on default port 33297 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-16T20:35:52,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35281 is added to blk_1073741872_1055 (size=6027) 2024-11-16T20:35:52,154 INFO [regionserver/40c018648b21:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46125,DS-9da48b7c-cdd6-45f8-8c4d-921ff678f746,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:35:52,227 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@447e22ba[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:35281, datanodeUuid=caa29ef5-220e-47df-9a82-70d13952f572, infoPort=44333, infoSecurePort=0, ipcPort=42843, storageInfo=lv=-57;cid=testClusterID;nsid=2134441233;c=1731789326364):Failed to transfer BP-1083962379-172.17.0.2-1731789326364:blk_1073741862_1045 to 127.0.0.1:46125 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T20:35:52,227 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@27b56a95[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:35281, datanodeUuid=caa29ef5-220e-47df-9a82-70d13952f572, infoPort=44333, infoSecurePort=0, ipcPort=42843, storageInfo=lv=-57;cid=testClusterID;nsid=2134441233;c=1731789326364):Failed to transfer BP-1083962379-172.17.0.2-1731789326364:blk_1073741842_1025 to 127.0.0.1:46125 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T20:35:52,412 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=45 (bloomFilter=true), to=hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cad6a923190659cbe955b214dd2ed352/.tmp/info/d7a2097c1a204068882ae16482ed157d 2024-11-16T20:35:52,419 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cad6a923190659cbe955b214dd2ed352/.tmp/info/d7a2097c1a204068882ae16482ed157d as hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cad6a923190659cbe955b214dd2ed352/info/d7a2097c1a204068882ae16482ed157d 2024-11-16T20:35:52,424 INFO [regionserver/40c018648b21:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46125,DS-9da48b7c-cdd6-45f8-8c4d-921ff678f746,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:35:52,424 WARN [regionserver/40c018648b21:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35281,DS-1631de22-0b9f-4017-baf0-41d8a91735fd,DISK]] 2024-11-16T20:35:52,424 DEBUG [regionserver/40c018648b21:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 40c018648b21%2C46419%2C1731789328882:(num 1731789350401) roll requested 2024-11-16T20:35:52,424 INFO [regionserver/40c018648b21:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 40c018648b21%2C46419%2C1731789328882.1731789352424 2024-11-16T20:35:52,425 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cad6a923190659cbe955b214dd2ed352/info/d7a2097c1a204068882ae16482ed157d, entries=1, sequenceid=45, filesize=5.9 K 2024-11-16T20:35:52,427 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for cad6a923190659cbe955b214dd2ed352 in 437ms, sequenceid=45, compaction requested=false 2024-11-16T20:35:52,427 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for cad6a923190659cbe955b214dd2ed352: 2024-11-16T20:35:52,427 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.5 K, sizeToCheck=16.0 K 2024-11-16T20:35:52,427 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T20:35:52,427 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cad6a923190659cbe955b214dd2ed352/info/f5039cfa0e684093985bd480f0b7c9e1 because midkey is the same as first or last row 2024-11-16T20:35:52,427 WARN [Thread-958 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741873_1056 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:35:52,427 WARN [Thread-958 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1083962379-172.17.0.2-1731789326364:blk_1073741873_1056 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45663,DS-37acd09e-e5ac-4527-955d-3c1799e61593,DISK], DatanodeInfoWithStorage[127.0.0.1:35281,DS-1631de22-0b9f-4017-baf0-41d8a91735fd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45663,DS-37acd09e-e5ac-4527-955d-3c1799e61593,DISK]) is bad. 2024-11-16T20:35:52,427 WARN [Thread-958 {}] hdfs.DataStreamer(1850): Abandoning BP-1083962379-172.17.0.2-1731789326364:blk_1073741873_1056 2024-11-16T20:35:52,428 WARN [Thread-958 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45663,DS-37acd09e-e5ac-4527-955d-3c1799e61593,DISK] 2024-11-16T20:35:52,430 WARN [Thread-958 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741874_1057 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:39211 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:35:52,430 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1242280705_22 at /127.0.0.1:47318 [Receiving block BP-1083962379-172.17.0.2-1731789326364:blk_1073741874_1057] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64da23ca-2820-0825-2f43-3ba7ac80db0b/cluster_1dc8cece-ac3d-b8bc-ccd8-8c4a0146ef64/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64da23ca-2820-0825-2f43-3ba7ac80db0b/cluster_1dc8cece-ac3d-b8bc-ccd8-8c4a0146ef64/data/data8]'}, localName='127.0.0.1:35281', datanodeUuid='caa29ef5-220e-47df-9a82-70d13952f572', xmitsInProgress=0}:Exception transferring block BP-1083962379-172.17.0.2-1731789326364:blk_1073741874_1057 to mirror 127.0.0.1:39211 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T20:35:52,430 WARN [Thread-958 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1083962379-172.17.0.2-1731789326364:blk_1073741874_1057 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35281,DS-1631de22-0b9f-4017-baf0-41d8a91735fd,DISK], DatanodeInfoWithStorage[127.0.0.1:39211,DS-66984a25-bb75-45b5-9d89-6a192cfe26fd,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39211,DS-66984a25-bb75-45b5-9d89-6a192cfe26fd,DISK]) is bad. 2024-11-16T20:35:52,430 WARN [Thread-958 {}] hdfs.DataStreamer(1850): Abandoning BP-1083962379-172.17.0.2-1731789326364:blk_1073741874_1057 2024-11-16T20:35:52,430 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1242280705_22 at /127.0.0.1:47318 [Receiving block BP-1083962379-172.17.0.2-1731789326364:blk_1073741874_1057] {}] datanode.BlockReceiver(316): Block 1073741874 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-16T20:35:52,430 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1242280705_22 at /127.0.0.1:47318 [Receiving block BP-1083962379-172.17.0.2-1731789326364:blk_1073741874_1057] {}] datanode.DataXceiver(331): 127.0.0.1:35281:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47318 dst: /127.0.0.1:35281 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T20:35:52,431 WARN [Thread-958 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39211,DS-66984a25-bb75-45b5-9d89-6a192cfe26fd,DISK] 2024-11-16T20:35:52,432 WARN [Thread-958 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741875_1058 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:35:52,432 WARN [Thread-958 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1083962379-172.17.0.2-1731789326364:blk_1073741875_1058 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34869,DS-61c766fc-84d8-4f41-b5fc-76f4226f4319,DISK], DatanodeInfoWithStorage[127.0.0.1:46125,DS-9da48b7c-cdd6-45f8-8c4d-921ff678f746,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34869,DS-61c766fc-84d8-4f41-b5fc-76f4226f4319,DISK]) is bad. 2024-11-16T20:35:52,432 WARN [Thread-958 {}] hdfs.DataStreamer(1850): Abandoning BP-1083962379-172.17.0.2-1731789326364:blk_1073741875_1058 2024-11-16T20:35:52,433 WARN [Thread-958 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34869,DS-61c766fc-84d8-4f41-b5fc-76f4226f4319,DISK] 2024-11-16T20:35:52,436 WARN [Thread-958 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741876_1059 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:46125 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:35:52,436 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1242280705_22 at /127.0.0.1:47320 [Receiving block BP-1083962379-172.17.0.2-1731789326364:blk_1073741876_1059] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64da23ca-2820-0825-2f43-3ba7ac80db0b/cluster_1dc8cece-ac3d-b8bc-ccd8-8c4a0146ef64/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64da23ca-2820-0825-2f43-3ba7ac80db0b/cluster_1dc8cece-ac3d-b8bc-ccd8-8c4a0146ef64/data/data8]'}, localName='127.0.0.1:35281', datanodeUuid='caa29ef5-220e-47df-9a82-70d13952f572', xmitsInProgress=0}:Exception transferring block BP-1083962379-172.17.0.2-1731789326364:blk_1073741876_1059 to mirror 127.0.0.1:46125 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T20:35:52,436 WARN [Thread-958 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1083962379-172.17.0.2-1731789326364:blk_1073741876_1059 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35281,DS-1631de22-0b9f-4017-baf0-41d8a91735fd,DISK], DatanodeInfoWithStorage[127.0.0.1:46125,DS-9da48b7c-cdd6-45f8-8c4d-921ff678f746,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:46125,DS-9da48b7c-cdd6-45f8-8c4d-921ff678f746,DISK]) is bad. 2024-11-16T20:35:52,436 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1242280705_22 at /127.0.0.1:47320 [Receiving block BP-1083962379-172.17.0.2-1731789326364:blk_1073741876_1059] {}] datanode.BlockReceiver(316): Block 1073741876 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-16T20:35:52,436 WARN [Thread-958 {}] hdfs.DataStreamer(1850): Abandoning BP-1083962379-172.17.0.2-1731789326364:blk_1073741876_1059 2024-11-16T20:35:52,436 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1242280705_22 at /127.0.0.1:47320 [Receiving block BP-1083962379-172.17.0.2-1731789326364:blk_1073741876_1059] {}] datanode.DataXceiver(331): 127.0.0.1:35281:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47320 dst: /127.0.0.1:35281 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T20:35:52,437 WARN [Thread-958 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46125,DS-9da48b7c-cdd6-45f8-8c4d-921ff678f746,DISK] 2024-11-16T20:35:52,437 WARN [IPC Server handler 0 on default port 33297 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-16T20:35:52,437 WARN [IPC Server handler 0 on default port 33297 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-16T20:35:52,437 WARN [IPC Server handler 0 on default port 33297 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-16T20:35:52,440 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:35:52,440 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:35:52,440 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:35:52,440 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:35:52,440 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:35:52,441 INFO [regionserver/40c018648b21:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.1731789350401 with entries=15, filesize=13.26 KB; new WAL /user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.1731789352424 2024-11-16T20:35:52,442 DEBUG [regionserver/40c018648b21:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44333:44333)] 2024-11-16T20:35:52,442 DEBUG [regionserver/40c018648b21:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.1731789329530 is not closed yet, will try archiving it next time 2024-11-16T20:35:52,442 DEBUG [regionserver/40c018648b21:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.1731789350401 is not closed yet, will try archiving it next time 2024-11-16T20:35:52,442 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.1731789346376 to hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/oldWALs/40c018648b21%2C46419%2C1731789328882.1731789346376 2024-11-16T20:35:52,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35281 is added to blk_1073741857_1040 (size=13591) 2024-11-16T20:35:52,443 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.1731789329530 is not closed yet, will try archiving it next time 2024-11-16T20:35:53,015 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46125,DS-9da48b7c-cdd6-45f8-8c4d-921ff678f746,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:35:53,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46419 {}] regionserver.HRegion(8855): Flush requested on cad6a923190659cbe955b214dd2ed352 2024-11-16T20:35:53,412 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing cad6a923190659cbe955b214dd2ed352 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-16T20:35:53,417 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cad6a923190659cbe955b214dd2ed352/.tmp/info/96d76f3d237c45e8a37117d4491ec0a3 is 1079, key is tmprow/info:/1731789353411/Put/seqid=0 2024-11-16T20:35:53,419 WARN [Thread-964 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741878_1061 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:35:53,419 WARN [Thread-964 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1083962379-172.17.0.2-1731789326364:blk_1073741878_1061 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45663,DS-37acd09e-e5ac-4527-955d-3c1799e61593,DISK], DatanodeInfoWithStorage[127.0.0.1:34869,DS-61c766fc-84d8-4f41-b5fc-76f4226f4319,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45663,DS-37acd09e-e5ac-4527-955d-3c1799e61593,DISK]) is bad. 2024-11-16T20:35:53,419 WARN [Thread-964 {}] hdfs.DataStreamer(1850): Abandoning BP-1083962379-172.17.0.2-1731789326364:blk_1073741878_1061 2024-11-16T20:35:53,420 WARN [Thread-964 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45663,DS-37acd09e-e5ac-4527-955d-3c1799e61593,DISK] 2024-11-16T20:35:53,421 WARN [Thread-964 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741879_1062 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:35:53,421 WARN [Thread-964 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1083962379-172.17.0.2-1731789326364:blk_1073741879_1062 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46125,DS-9da48b7c-cdd6-45f8-8c4d-921ff678f746,DISK], DatanodeInfoWithStorage[127.0.0.1:35281,DS-1631de22-0b9f-4017-baf0-41d8a91735fd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46125,DS-9da48b7c-cdd6-45f8-8c4d-921ff678f746,DISK]) is bad. 2024-11-16T20:35:53,421 WARN [Thread-964 {}] hdfs.DataStreamer(1850): Abandoning BP-1083962379-172.17.0.2-1731789326364:blk_1073741879_1062 2024-11-16T20:35:53,422 WARN [Thread-964 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46125,DS-9da48b7c-cdd6-45f8-8c4d-921ff678f746,DISK] 2024-11-16T20:35:53,424 WARN [Thread-964 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741880_1063 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:39211 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:35:53,424 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1242280705_22 at /127.0.0.1:47338 [Receiving block BP-1083962379-172.17.0.2-1731789326364:blk_1073741880_1063] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64da23ca-2820-0825-2f43-3ba7ac80db0b/cluster_1dc8cece-ac3d-b8bc-ccd8-8c4a0146ef64/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64da23ca-2820-0825-2f43-3ba7ac80db0b/cluster_1dc8cece-ac3d-b8bc-ccd8-8c4a0146ef64/data/data8]'}, localName='127.0.0.1:35281', datanodeUuid='caa29ef5-220e-47df-9a82-70d13952f572', xmitsInProgress=0}:Exception transferring block BP-1083962379-172.17.0.2-1731789326364:blk_1073741880_1063 to mirror 127.0.0.1:39211 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T20:35:53,424 WARN [Thread-964 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1083962379-172.17.0.2-1731789326364:blk_1073741880_1063 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35281,DS-1631de22-0b9f-4017-baf0-41d8a91735fd,DISK], DatanodeInfoWithStorage[127.0.0.1:39211,DS-66984a25-bb75-45b5-9d89-6a192cfe26fd,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39211,DS-66984a25-bb75-45b5-9d89-6a192cfe26fd,DISK]) is bad. 2024-11-16T20:35:53,424 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1242280705_22 at /127.0.0.1:47338 [Receiving block BP-1083962379-172.17.0.2-1731789326364:blk_1073741880_1063] {}] datanode.BlockReceiver(316): Block 1073741880 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-16T20:35:53,424 WARN [Thread-964 {}] hdfs.DataStreamer(1850): Abandoning BP-1083962379-172.17.0.2-1731789326364:blk_1073741880_1063 2024-11-16T20:35:53,424 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1242280705_22 at /127.0.0.1:47338 [Receiving block BP-1083962379-172.17.0.2-1731789326364:blk_1073741880_1063] {}] datanode.DataXceiver(331): 127.0.0.1:35281:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47338 dst: /127.0.0.1:35281 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T20:35:53,425 WARN [Thread-964 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39211,DS-66984a25-bb75-45b5-9d89-6a192cfe26fd,DISK] 2024-11-16T20:35:53,428 WARN [Thread-964 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741881_1064 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34869 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:35:53,428 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1242280705_22 at /127.0.0.1:47352 [Receiving block BP-1083962379-172.17.0.2-1731789326364:blk_1073741881_1064] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64da23ca-2820-0825-2f43-3ba7ac80db0b/cluster_1dc8cece-ac3d-b8bc-ccd8-8c4a0146ef64/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64da23ca-2820-0825-2f43-3ba7ac80db0b/cluster_1dc8cece-ac3d-b8bc-ccd8-8c4a0146ef64/data/data8]'}, localName='127.0.0.1:35281', datanodeUuid='caa29ef5-220e-47df-9a82-70d13952f572', xmitsInProgress=0}:Exception transferring block BP-1083962379-172.17.0.2-1731789326364:blk_1073741881_1064 to mirror 127.0.0.1:34869 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T20:35:53,428 WARN [Thread-964 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1083962379-172.17.0.2-1731789326364:blk_1073741881_1064 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35281,DS-1631de22-0b9f-4017-baf0-41d8a91735fd,DISK], DatanodeInfoWithStorage[127.0.0.1:34869,DS-61c766fc-84d8-4f41-b5fc-76f4226f4319,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34869,DS-61c766fc-84d8-4f41-b5fc-76f4226f4319,DISK]) is bad. 2024-11-16T20:35:53,428 WARN [Thread-964 {}] hdfs.DataStreamer(1850): Abandoning BP-1083962379-172.17.0.2-1731789326364:blk_1073741881_1064 2024-11-16T20:35:53,428 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1242280705_22 at /127.0.0.1:47352 [Receiving block BP-1083962379-172.17.0.2-1731789326364:blk_1073741881_1064] {}] datanode.BlockReceiver(316): Block 1073741881 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-16T20:35:53,428 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1242280705_22 at /127.0.0.1:47352 [Receiving block BP-1083962379-172.17.0.2-1731789326364:blk_1073741881_1064] {}] datanode.DataXceiver(331): 127.0.0.1:35281:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47352 dst: /127.0.0.1:35281 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T20:35:53,428 WARN [Thread-964 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34869,DS-61c766fc-84d8-4f41-b5fc-76f4226f4319,DISK] 2024-11-16T20:35:53,429 WARN [IPC Server handler 1 on default port 33297 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-16T20:35:53,429 WARN [IPC Server handler 1 on default port 33297 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-16T20:35:53,429 WARN [IPC Server handler 1 on default port 33297 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-16T20:35:53,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35281 is added to blk_1073741882_1065 (size=6027) 2024-11-16T20:35:53,833 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cad6a923190659cbe955b214dd2ed352/.tmp/info/96d76f3d237c45e8a37117d4491ec0a3 2024-11-16T20:35:53,841 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cad6a923190659cbe955b214dd2ed352/.tmp/info/96d76f3d237c45e8a37117d4491ec0a3 as hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cad6a923190659cbe955b214dd2ed352/info/96d76f3d237c45e8a37117d4491ec0a3 2024-11-16T20:35:53,847 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cad6a923190659cbe955b214dd2ed352/info/96d76f3d237c45e8a37117d4491ec0a3, entries=1, sequenceid=55, filesize=5.9 K 2024-11-16T20:35:53,848 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for cad6a923190659cbe955b214dd2ed352 in 436ms, sequenceid=55, compaction requested=true 2024-11-16T20:35:53,848 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for cad6a923190659cbe955b214dd2ed352: 2024-11-16T20:35:53,849 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=29.3 K, sizeToCheck=16.0 K 2024-11-16T20:35:53,849 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T20:35:53,849 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cad6a923190659cbe955b214dd2ed352/info/f5039cfa0e684093985bd480f0b7c9e1 because midkey is the same as first or last row 2024-11-16T20:35:53,849 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cad6a923190659cbe955b214dd2ed352:info, priority=-2147483648, current under compaction store size is 1 2024-11-16T20:35:53,849 DEBUG [RS:0;40c018648b21:46419-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-16T20:35:53,849 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T20:35:53,850 DEBUG [RS:0;40c018648b21:46419-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 30048 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-16T20:35:53,850 DEBUG [RS:0;40c018648b21:46419-shortCompactions-0 {}] regionserver.HStore(1541): cad6a923190659cbe955b214dd2ed352/info is initiating minor compaction (all files) 2024-11-16T20:35:53,850 INFO [RS:0;40c018648b21:46419-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of cad6a923190659cbe955b214dd2ed352/info in TestLogRolling-testLogRollOnDatanodeDeath,,1731789330196.cad6a923190659cbe955b214dd2ed352. 2024-11-16T20:35:53,851 INFO [RS:0;40c018648b21:46419-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cad6a923190659cbe955b214dd2ed352/info/f5039cfa0e684093985bd480f0b7c9e1, hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cad6a923190659cbe955b214dd2ed352/info/d7a2097c1a204068882ae16482ed157d, hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cad6a923190659cbe955b214dd2ed352/info/96d76f3d237c45e8a37117d4491ec0a3] into tmpdir=hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cad6a923190659cbe955b214dd2ed352/.tmp, totalSize=29.3 K 2024-11-16T20:35:53,851 DEBUG [RS:0;40c018648b21:46419-shortCompactions-0 {}] compactions.Compactor(225): Compacting f5039cfa0e684093985bd480f0b7c9e1, keycount=12, bloomtype=ROW, size=17.6 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1731789344418 2024-11-16T20:35:53,852 DEBUG [RS:0;40c018648b21:46419-shortCompactions-0 {}] compactions.Compactor(225): Compacting d7a2097c1a204068882ae16482ed157d, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=45, earliestPutTs=1731789351987 2024-11-16T20:35:53,852 DEBUG [RS:0;40c018648b21:46419-shortCompactions-0 {}] compactions.Compactor(225): Compacting 96d76f3d237c45e8a37117d4491ec0a3, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1731789353411 2024-11-16T20:35:53,871 INFO [RS:0;40c018648b21:46419-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): cad6a923190659cbe955b214dd2ed352#info#compaction#24 average throughput is 12.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-16T20:35:53,872 DEBUG [RS:0;40c018648b21:46419-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cad6a923190659cbe955b214dd2ed352/.tmp/info/65d9edd67a1d43daa64287af2d6be5aa is 1080, key is row0002/info:/1731789344418/Put/seqid=0 2024-11-16T20:35:53,874 WARN [Thread-970 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741883_1066 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:35:53,874 WARN [Thread-970 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1083962379-172.17.0.2-1731789326364:blk_1073741883_1066 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34869,DS-61c766fc-84d8-4f41-b5fc-76f4226f4319,DISK], DatanodeInfoWithStorage[127.0.0.1:39211,DS-66984a25-bb75-45b5-9d89-6a192cfe26fd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34869,DS-61c766fc-84d8-4f41-b5fc-76f4226f4319,DISK]) is bad. 2024-11-16T20:35:53,874 WARN [Thread-970 {}] hdfs.DataStreamer(1850): Abandoning BP-1083962379-172.17.0.2-1731789326364:blk_1073741883_1066 2024-11-16T20:35:53,875 WARN [Thread-970 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34869,DS-61c766fc-84d8-4f41-b5fc-76f4226f4319,DISK] 2024-11-16T20:35:53,876 WARN [Thread-970 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741884_1067 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:35:53,876 WARN [Thread-970 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1083962379-172.17.0.2-1731789326364:blk_1073741884_1067 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39211,DS-66984a25-bb75-45b5-9d89-6a192cfe26fd,DISK], DatanodeInfoWithStorage[127.0.0.1:46125,DS-9da48b7c-cdd6-45f8-8c4d-921ff678f746,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39211,DS-66984a25-bb75-45b5-9d89-6a192cfe26fd,DISK]) is bad. 2024-11-16T20:35:53,877 WARN [Thread-970 {}] hdfs.DataStreamer(1850): Abandoning BP-1083962379-172.17.0.2-1731789326364:blk_1073741884_1067 2024-11-16T20:35:53,877 WARN [Thread-970 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39211,DS-66984a25-bb75-45b5-9d89-6a192cfe26fd,DISK] 2024-11-16T20:35:53,878 WARN [Thread-970 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741885_1068 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:35:53,879 WARN [Thread-970 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1083962379-172.17.0.2-1731789326364:blk_1073741885_1068 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45663,DS-37acd09e-e5ac-4527-955d-3c1799e61593,DISK], DatanodeInfoWithStorage[127.0.0.1:35281,DS-1631de22-0b9f-4017-baf0-41d8a91735fd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45663,DS-37acd09e-e5ac-4527-955d-3c1799e61593,DISK]) is bad. 2024-11-16T20:35:53,879 WARN [Thread-970 {}] hdfs.DataStreamer(1850): Abandoning BP-1083962379-172.17.0.2-1731789326364:blk_1073741885_1068 2024-11-16T20:35:53,879 WARN [Thread-970 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45663,DS-37acd09e-e5ac-4527-955d-3c1799e61593,DISK] 2024-11-16T20:35:53,880 WARN [Thread-970 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741886_1069 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:35:53,881 WARN [Thread-970 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1083962379-172.17.0.2-1731789326364:blk_1073741886_1069 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46125,DS-9da48b7c-cdd6-45f8-8c4d-921ff678f746,DISK], DatanodeInfoWithStorage[127.0.0.1:35281,DS-1631de22-0b9f-4017-baf0-41d8a91735fd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46125,DS-9da48b7c-cdd6-45f8-8c4d-921ff678f746,DISK]) is bad. 2024-11-16T20:35:53,881 WARN [Thread-970 {}] hdfs.DataStreamer(1850): Abandoning BP-1083962379-172.17.0.2-1731789326364:blk_1073741886_1069 2024-11-16T20:35:53,881 WARN [Thread-970 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46125,DS-9da48b7c-cdd6-45f8-8c4d-921ff678f746,DISK] 2024-11-16T20:35:53,882 WARN [IPC Server handler 1 on default port 33297 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-16T20:35:53,882 WARN [IPC Server handler 1 on default port 33297 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-16T20:35:53,882 WARN [IPC Server handler 1 on default port 33297 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-16T20:35:53,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35281 is added to blk_1073741887_1070 (size=18097) 2024-11-16T20:35:54,154 INFO [regionserver/40c018648b21:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46125,DS-9da48b7c-cdd6-45f8-8c4d-921ff678f746,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:35:54,227 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@447e22ba[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:35281, datanodeUuid=caa29ef5-220e-47df-9a82-70d13952f572, infoPort=44333, infoSecurePort=0, ipcPort=42843, storageInfo=lv=-57;cid=testClusterID;nsid=2134441233;c=1731789326364):Failed to transfer BP-1083962379-172.17.0.2-1731789326364:blk_1073741872_1055 to 127.0.0.1:34869 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T20:35:54,227 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@27b56a95[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:35281, datanodeUuid=caa29ef5-220e-47df-9a82-70d13952f572, infoPort=44333, infoSecurePort=0, ipcPort=42843, storageInfo=lv=-57;cid=testClusterID;nsid=2134441233;c=1731789326364):Failed to transfer BP-1083962379-172.17.0.2-1731789326364:blk_1073741867_1050 to 127.0.0.1:34869 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T20:35:54,298 DEBUG [RS:0;40c018648b21:46419-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cad6a923190659cbe955b214dd2ed352/.tmp/info/65d9edd67a1d43daa64287af2d6be5aa as hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cad6a923190659cbe955b214dd2ed352/info/65d9edd67a1d43daa64287af2d6be5aa 2024-11-16T20:35:54,307 INFO [RS:0;40c018648b21:46419-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in cad6a923190659cbe955b214dd2ed352/info of cad6a923190659cbe955b214dd2ed352 into 65d9edd67a1d43daa64287af2d6be5aa(size=17.7 K), total size for store is 17.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-16T20:35:54,307 DEBUG [RS:0;40c018648b21:46419-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for cad6a923190659cbe955b214dd2ed352: 2024-11-16T20:35:54,308 INFO [RS:0;40c018648b21:46419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1731789330196.cad6a923190659cbe955b214dd2ed352., storeName=cad6a923190659cbe955b214dd2ed352/info, priority=13, startTime=1731789353849; duration=0sec 2024-11-16T20:35:54,308 DEBUG [RS:0;40c018648b21:46419-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-16T20:35:54,308 DEBUG [RS:0;40c018648b21:46419-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T20:35:54,308 DEBUG [RS:0;40c018648b21:46419-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cad6a923190659cbe955b214dd2ed352/info/65d9edd67a1d43daa64287af2d6be5aa because midkey is the same as first or last row 2024-11-16T20:35:54,308 DEBUG [RS:0;40c018648b21:46419-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-16T20:35:54,308 DEBUG [RS:0;40c018648b21:46419-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T20:35:54,308 DEBUG [RS:0;40c018648b21:46419-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cad6a923190659cbe955b214dd2ed352/info/65d9edd67a1d43daa64287af2d6be5aa because midkey is the same as first or last row 2024-11-16T20:35:54,308 DEBUG [RS:0;40c018648b21:46419-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-16T20:35:54,308 DEBUG [RS:0;40c018648b21:46419-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T20:35:54,308 DEBUG [RS:0;40c018648b21:46419-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cad6a923190659cbe955b214dd2ed352/info/65d9edd67a1d43daa64287af2d6be5aa because midkey is the same as first or last row 2024-11-16T20:35:54,308 DEBUG [RS:0;40c018648b21:46419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T20:35:54,308 DEBUG [RS:0;40c018648b21:46419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cad6a923190659cbe955b214dd2ed352:info 2024-11-16T20:35:54,442 INFO [regionserver/40c018648b21:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46125,DS-9da48b7c-cdd6-45f8-8c4d-921ff678f746,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:35:54,443 WARN [regionserver/40c018648b21:0.logRoller {}] wal.FSHLog(539): Too many consecutive RollWriter requests, it's a sign of the total number of live datanodes is lower than the tolerable replicas. 2024-11-16T20:35:54,640 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T20:35:54,643 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T20:35:54,644 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T20:35:54,644 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T20:35:54,644 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-16T20:35:54,645 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@441dcfc4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64da23ca-2820-0825-2f43-3ba7ac80db0b/hadoop.log.dir/,AVAILABLE} 2024-11-16T20:35:54,645 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@381443d3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T20:35:54,760 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@14b00457{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64da23ca-2820-0825-2f43-3ba7ac80db0b/java.io.tmpdir/jetty-localhost-35487-hadoop-hdfs-3_4_1-tests_jar-_-any-4144161698618016513/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T20:35:54,760 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@22c6c03b{HTTP/1.1, (http/1.1)}{localhost:35487} 2024-11-16T20:35:54,760 INFO [Time-limited test {}] server.Server(415): Started @136593ms 2024-11-16T20:35:54,762 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T20:35:55,015 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46125,DS-9da48b7c-cdd6-45f8-8c4d-921ff678f746,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:35:55,197 WARN [Thread-988 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T20:35:55,205 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa3f07523428c0cce with lease ID 0x455d738420b546e4: from storage DS-66984a25-bb75-45b5-9d89-6a192cfe26fd node DatanodeRegistration(127.0.0.1:34121, datanodeUuid=f19a5063-ec0a-4807-a891-570d666b0316, infoPort=40931, infoSecurePort=0, ipcPort=39417, storageInfo=lv=-57;cid=testClusterID;nsid=2134441233;c=1731789326364), blocks: 6, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-16T20:35:55,205 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa3f07523428c0cce with lease ID 0x455d738420b546e4: from storage DS-d3b67d13-672b-4d4c-b941-4a5bddb76fae node DatanodeRegistration(127.0.0.1:34121, datanodeUuid=f19a5063-ec0a-4807-a891-570d666b0316, infoPort=40931, infoSecurePort=0, ipcPort=39417, storageInfo=lv=-57;cid=testClusterID;nsid=2134441233;c=1731789326364), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T20:35:55,227 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@27b56a95[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:35281, datanodeUuid=caa29ef5-220e-47df-9a82-70d13952f572, infoPort=44333, infoSecurePort=0, ipcPort=42843, storageInfo=lv=-57;cid=testClusterID;nsid=2134441233;c=1731789326364):Failed to transfer BP-1083962379-172.17.0.2-1731789326364:blk_1073741882_1065 to 127.0.0.1:34869 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T20:35:55,227 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@447e22ba[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:35281, datanodeUuid=caa29ef5-220e-47df-9a82-70d13952f572, infoPort=44333, infoSecurePort=0, ipcPort=42843, storageInfo=lv=-57;cid=testClusterID;nsid=2134441233;c=1731789326364):Failed to transfer BP-1083962379-172.17.0.2-1731789326364:blk_1073741857_1040 to 127.0.0.1:34869 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T20:35:56,155 INFO [regionserver/40c018648b21:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46125,DS-9da48b7c-cdd6-45f8-8c4d-921ff678f746,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:35:56,443 INFO [regionserver/40c018648b21:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46125,DS-9da48b7c-cdd6-45f8-8c4d-921ff678f746,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:35:57,016 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46125,DS-9da48b7c-cdd6-45f8-8c4d-921ff678f746,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:35:57,227 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@27b56a95[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:35281, datanodeUuid=caa29ef5-220e-47df-9a82-70d13952f572, infoPort=44333, infoSecurePort=0, ipcPort=42843, storageInfo=lv=-57;cid=testClusterID;nsid=2134441233;c=1731789326364):Failed to transfer BP-1083962379-172.17.0.2-1731789326364:blk_1073741887_1070 to 127.0.0.1:45663 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T20:35:58,155 INFO [regionserver/40c018648b21:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46125,DS-9da48b7c-cdd6-45f8-8c4d-921ff678f746,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:35:58,443 INFO [regionserver/40c018648b21:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46125,DS-9da48b7c-cdd6-45f8-8c4d-921ff678f746,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:35:58,688 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-16T20:35:59,016 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46125,DS-9da48b7c-cdd6-45f8-8c4d-921ff678f746,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:35:59,261 ERROR [FSHLog-0-hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/MasterData-prefix:40c018648b21,37849,1731789328708 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46125,DS-9da48b7c-cdd6-45f8-8c4d-921ff678f746,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:35:59,261 WARN [FSHLog-0-hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/MasterData-prefix:40c018648b21,37849,1731789328708 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46125,DS-9da48b7c-cdd6-45f8-8c4d-921ff678f746,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:35:59,261 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 40c018648b21%2C37849%2C1731789328708:(num 1731789329038) roll requested 2024-11-16T20:35:59,262 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 40c018648b21%2C37849%2C1731789328708.1731789359261 2024-11-16T20:35:59,265 WARN [Thread-1008 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741888_1071 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:35:59,265 WARN [Thread-1008 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1083962379-172.17.0.2-1731789326364:blk_1073741888_1071 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46125,DS-9da48b7c-cdd6-45f8-8c4d-921ff678f746,DISK], DatanodeInfoWithStorage[127.0.0.1:34121,DS-66984a25-bb75-45b5-9d89-6a192cfe26fd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46125,DS-9da48b7c-cdd6-45f8-8c4d-921ff678f746,DISK]) is bad. 2024-11-16T20:35:59,265 WARN [Thread-1008 {}] hdfs.DataStreamer(1850): Abandoning BP-1083962379-172.17.0.2-1731789326364:blk_1073741888_1071 2024-11-16T20:35:59,266 WARN [Thread-1008 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46125,DS-9da48b7c-cdd6-45f8-8c4d-921ff678f746,DISK] 2024-11-16T20:35:59,267 WARN [Thread-1008 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741889_1072 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:35:59,267 WARN [Thread-1008 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1083962379-172.17.0.2-1731789326364:blk_1073741889_1072 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34869,DS-61c766fc-84d8-4f41-b5fc-76f4226f4319,DISK], DatanodeInfoWithStorage[127.0.0.1:45663,DS-37acd09e-e5ac-4527-955d-3c1799e61593,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34869,DS-61c766fc-84d8-4f41-b5fc-76f4226f4319,DISK]) is bad. 2024-11-16T20:35:59,267 WARN [Thread-1008 {}] hdfs.DataStreamer(1850): Abandoning BP-1083962379-172.17.0.2-1731789326364:blk_1073741889_1072 2024-11-16T20:35:59,268 WARN [Thread-1008 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34869,DS-61c766fc-84d8-4f41-b5fc-76f4226f4319,DISK] 2024-11-16T20:35:59,269 WARN [Thread-1008 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741890_1073 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:35:59,269 WARN [Thread-1008 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1083962379-172.17.0.2-1731789326364:blk_1073741890_1073 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45663,DS-37acd09e-e5ac-4527-955d-3c1799e61593,DISK], DatanodeInfoWithStorage[127.0.0.1:35281,DS-1631de22-0b9f-4017-baf0-41d8a91735fd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45663,DS-37acd09e-e5ac-4527-955d-3c1799e61593,DISK]) is bad. 2024-11-16T20:35:59,269 WARN [Thread-1008 {}] hdfs.DataStreamer(1850): Abandoning BP-1083962379-172.17.0.2-1731789326364:blk_1073741890_1073 2024-11-16T20:35:59,270 WARN [Thread-1008 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45663,DS-37acd09e-e5ac-4527-955d-3c1799e61593,DISK] 2024-11-16T20:35:59,275 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:35:59,275 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:35:59,275 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:35:59,275 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:35:59,275 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:35:59,275 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/MasterData/WALs/40c018648b21,37849,1731789328708/40c018648b21%2C37849%2C1731789328708.1731789329038 with entries=54, filesize=26.67 KB; new WAL /user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/MasterData/WALs/40c018648b21,37849,1731789328708/40c018648b21%2C37849%2C1731789328708.1731789359261 2024-11-16T20:35:59,276 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46125,DS-9da48b7c-cdd6-45f8-8c4d-921ff678f746,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:35:59,276 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46125,DS-9da48b7c-cdd6-45f8-8c4d-921ff678f746,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:35:59,276 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/MasterData/WALs/40c018648b21,37849,1731789328708/40c018648b21%2C37849%2C1731789328708.1731789329038 2024-11-16T20:35:59,276 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40931:40931),(127.0.0.1/127.0.0.1:44333:44333)] 2024-11-16T20:35:59,276 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/MasterData/WALs/40c018648b21,37849,1731789328708/40c018648b21%2C37849%2C1731789328708.1731789329038 is not closed yet, will try archiving it next time 2024-11-16T20:35:59,277 WARN [IPC Server handler 2 on default port 33297 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/MasterData/WALs/40c018648b21,37849,1731789328708/40c018648b21%2C37849%2C1731789328708.1731789329038 has not been closed. Lease recovery is in progress. RecoveryId = 1075 for block blk_1073741830_1006 2024-11-16T20:35:59,277 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/MasterData/WALs/40c018648b21,37849,1731789328708/40c018648b21%2C37849%2C1731789328708.1731789329038 after 1ms 2024-11-16T20:36:00,156 INFO [regionserver/40c018648b21:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46125,DS-9da48b7c-cdd6-45f8-8c4d-921ff678f746,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:36:00,444 INFO [regionserver/40c018648b21:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46125,DS-9da48b7c-cdd6-45f8-8c4d-921ff678f746,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:36:02,156 INFO [regionserver/40c018648b21:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46125,DS-9da48b7c-cdd6-45f8-8c4d-921ff678f746,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:36:02,444 INFO [regionserver/40c018648b21:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46125,DS-9da48b7c-cdd6-45f8-8c4d-921ff678f746,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:36:03,278 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/MasterData/WALs/40c018648b21,37849,1731789328708/40c018648b21%2C37849%2C1731789328708.1731789329038 after 4002ms 2024-11-16T20:36:04,157 INFO [regionserver/40c018648b21:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46125,DS-9da48b7c-cdd6-45f8-8c4d-921ff678f746,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:36:04,445 INFO [regionserver/40c018648b21:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46125,DS-9da48b7c-cdd6-45f8-8c4d-921ff678f746,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:36:05,217 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@660d7800 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1083962379-172.17.0.2-1731789326364:blk_1073741833_1009, datanode=DatanodeInfoWithStorage[127.0.0.1:46125,null,null]) java.net.ConnectException: Call From 40c018648b21/172.17.0.2 to localhost:44965 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-16T20:36:05,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34121 is added to blk_1073741833_1019 (size=455) 2024-11-16T20:36:05,394 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.1731789329530 to hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/oldWALs/40c018648b21%2C46419%2C1731789328882.1731789329530 2024-11-16T20:36:05,396 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.1731789350401 to hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/oldWALs/40c018648b21%2C46419%2C1731789328882.1731789350401 2024-11-16T20:36:06,157 INFO [regionserver/40c018648b21:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46125,DS-9da48b7c-cdd6-45f8-8c4d-921ff678f746,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:36:06,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35281 is added to blk_1073741833_1019 (size=455) 2024-11-16T20:36:06,445 INFO [regionserver/40c018648b21:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46125,DS-9da48b7c-cdd6-45f8-8c4d-921ff678f746,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:36:08,107 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 40c018648b21%2C46419%2C1731789328882.1731789368106 2024-11-16T20:36:08,112 WARN [Thread-1019 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741892_1076 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34869 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:36:08,111 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_11921011_22 at /127.0.0.1:37240 [Receiving block BP-1083962379-172.17.0.2-1731789326364:blk_1073741892_1076] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64da23ca-2820-0825-2f43-3ba7ac80db0b/cluster_1dc8cece-ac3d-b8bc-ccd8-8c4a0146ef64/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64da23ca-2820-0825-2f43-3ba7ac80db0b/cluster_1dc8cece-ac3d-b8bc-ccd8-8c4a0146ef64/data/data4]'}, localName='127.0.0.1:34121', datanodeUuid='f19a5063-ec0a-4807-a891-570d666b0316', xmitsInProgress=0}:Exception transferring block BP-1083962379-172.17.0.2-1731789326364:blk_1073741892_1076 to mirror 127.0.0.1:34869 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T20:36:08,112 WARN [Thread-1019 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1083962379-172.17.0.2-1731789326364:blk_1073741892_1076 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34121,DS-66984a25-bb75-45b5-9d89-6a192cfe26fd,DISK], DatanodeInfoWithStorage[127.0.0.1:34869,DS-61c766fc-84d8-4f41-b5fc-76f4226f4319,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34869,DS-61c766fc-84d8-4f41-b5fc-76f4226f4319,DISK]) is bad. 2024-11-16T20:36:08,112 WARN [Thread-1019 {}] hdfs.DataStreamer(1850): Abandoning BP-1083962379-172.17.0.2-1731789326364:blk_1073741892_1076 2024-11-16T20:36:08,112 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_11921011_22 at /127.0.0.1:37240 [Receiving block BP-1083962379-172.17.0.2-1731789326364:blk_1073741892_1076] {}] datanode.BlockReceiver(316): Block 1073741892 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-16T20:36:08,112 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_11921011_22 at /127.0.0.1:37240 [Receiving block BP-1083962379-172.17.0.2-1731789326364:blk_1073741892_1076] {}] datanode.DataXceiver(331): 127.0.0.1:34121:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37240 dst: /127.0.0.1:34121 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T20:36:08,113 WARN [Thread-1019 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34869,DS-61c766fc-84d8-4f41-b5fc-76f4226f4319,DISK] 2024-11-16T20:36:08,118 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:36:08,118 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:36:08,118 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:36:08,118 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:36:08,118 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:36:08,118 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.1731789352424 with entries=13, filesize=12.60 KB; new WAL /user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.1731789368106 2024-11-16T20:36:08,119 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40931:40931),(127.0.0.1/127.0.0.1:44333:44333)] 2024-11-16T20:36:08,119 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.1731789352424 is not closed yet, will try archiving it next time 2024-11-16T20:36:08,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35281 is added to blk_1073741877_1060 (size=12911) 2024-11-16T20:36:08,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46419 {}] regionserver.HRegion(8855): Flush requested on cad6a923190659cbe955b214dd2ed352 2024-11-16T20:36:08,123 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing cad6a923190659cbe955b214dd2ed352 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-16T20:36:08,128 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cad6a923190659cbe955b214dd2ed352/.tmp/info/5aa5af6bec8c4896a765664d02aff6fa is 1080, key is row0013/info:/1731789368120/Put/seqid=0 2024-11-16T20:36:08,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35281 is added to blk_1073741894_1078 (size=8190) 2024-11-16T20:36:08,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34121 is added to blk_1073741894_1078 (size=8190) 2024-11-16T20:36:08,157 INFO [regionserver/40c018648b21:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46125,DS-9da48b7c-cdd6-45f8-8c4d-921ff678f746,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:36:08,445 INFO [regionserver/40c018648b21:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46125,DS-9da48b7c-cdd6-45f8-8c4d-921ff678f746,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:36:08,446 INFO [regionserver/40c018648b21:0.logRoller {}] wal.FSHLog(556): LowReplication-Roller was enabled. 2024-11-16T20:36:08,541 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cad6a923190659cbe955b214dd2ed352/.tmp/info/5aa5af6bec8c4896a765664d02aff6fa 2024-11-16T20:36:08,549 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-16T20:36:08,550 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-16T20:36:08,550 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T20:36:08,550 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T20:36:08,550 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T20:36:08,550 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-16T20:36:08,551 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cad6a923190659cbe955b214dd2ed352/.tmp/info/5aa5af6bec8c4896a765664d02aff6fa as hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cad6a923190659cbe955b214dd2ed352/info/5aa5af6bec8c4896a765664d02aff6fa 2024-11-16T20:36:08,551 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-16T20:36:08,551 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=401360406, stopped=false 2024-11-16T20:36:08,551 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=40c018648b21,37849,1731789328708 2024-11-16T20:36:08,558 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cad6a923190659cbe955b214dd2ed352/info/5aa5af6bec8c4896a765664d02aff6fa, entries=3, sequenceid=66, filesize=8.0 K 2024-11-16T20:36:08,559 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7527, heapSize ~8.11 KB/8304, currentSize=10.51 KB/10758 for cad6a923190659cbe955b214dd2ed352 in 436ms, sequenceid=66, compaction requested=false 2024-11-16T20:36:08,560 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for cad6a923190659cbe955b214dd2ed352: 2024-11-16T20:36:08,560 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=25.7 K, sizeToCheck=16.0 K 2024-11-16T20:36:08,560 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T20:36:08,560 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cad6a923190659cbe955b214dd2ed352/info/65d9edd67a1d43daa64287af2d6be5aa because midkey is the same as first or last row 2024-11-16T20:36:08,617 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46419-0x101455c0ce00001, quorum=127.0.0.1:50571, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-16T20:36:08,617 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39701-0x101455c0ce00002, quorum=127.0.0.1:50571, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-16T20:36:08,617 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37849-0x101455c0ce00000, quorum=127.0.0.1:50571, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-16T20:36:08,617 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37849-0x101455c0ce00000, quorum=127.0.0.1:50571, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:36:08,617 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46419-0x101455c0ce00001, quorum=127.0.0.1:50571, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:36:08,617 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39701-0x101455c0ce00002, quorum=127.0.0.1:50571, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:36:08,618 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-16T20:36:08,618 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-16T20:36:08,618 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T20:36:08,618 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:46419-0x101455c0ce00001, quorum=127.0.0.1:50571, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T20:36:08,618 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T20:36:08,618 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39701-0x101455c0ce00002, quorum=127.0.0.1:50571, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T20:36:08,618 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:37849-0x101455c0ce00000, quorum=127.0.0.1:50571, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T20:36:08,618 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '40c018648b21,46419,1731789328882' ***** 2024-11-16T20:36:08,618 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-16T20:36:08,618 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '40c018648b21,39701,1731789330052' ***** 2024-11-16T20:36:08,618 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-16T20:36:08,619 INFO [RS:0;40c018648b21:46419 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-16T20:36:08,619 INFO [RS:1;40c018648b21:39701 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-16T20:36:08,619 INFO [RS:0;40c018648b21:46419 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-16T20:36:08,619 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-16T20:36:08,619 INFO [RS:1;40c018648b21:39701 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-16T20:36:08,619 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-16T20:36:08,619 INFO [RS:1;40c018648b21:39701 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-16T20:36:08,619 INFO [RS:1;40c018648b21:39701 {}] regionserver.HRegionServer(959): stopping server 40c018648b21,39701,1731789330052 2024-11-16T20:36:08,619 INFO [RS:1;40c018648b21:39701 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-16T20:36:08,619 INFO [RS:1;40c018648b21:39701 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;40c018648b21:39701. 2024-11-16T20:36:08,619 DEBUG [RS:1;40c018648b21:39701 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T20:36:08,619 INFO [RS:0;40c018648b21:46419 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-16T20:36:08,619 DEBUG [RS:1;40c018648b21:39701 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T20:36:08,620 INFO [RS:0;40c018648b21:46419 {}] regionserver.HRegionServer(3091): Received CLOSE for cad6a923190659cbe955b214dd2ed352 2024-11-16T20:36:08,620 INFO [RS:1;40c018648b21:39701 {}] regionserver.HRegionServer(976): stopping server 40c018648b21,39701,1731789330052; all regions closed. 2024-11-16T20:36:08,620 INFO [RS:0;40c018648b21:46419 {}] regionserver.HRegionServer(959): stopping server 40c018648b21,46419,1731789328882 2024-11-16T20:36:08,620 INFO [RS:0;40c018648b21:46419 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-16T20:36:08,620 INFO [RS:0;40c018648b21:46419 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;40c018648b21:46419. 2024-11-16T20:36:08,620 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:36:08,620 DEBUG [RS_CLOSE_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing cad6a923190659cbe955b214dd2ed352, disabling compactions & flushes 2024-11-16T20:36:08,620 DEBUG [RS:0;40c018648b21:46419 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T20:36:08,620 DEBUG [RS:0;40c018648b21:46419 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T20:36:08,620 INFO [RS_CLOSE_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1731789330196.cad6a923190659cbe955b214dd2ed352. 2024-11-16T20:36:08,620 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:36:08,620 DEBUG [RS_CLOSE_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731789330196.cad6a923190659cbe955b214dd2ed352. 2024-11-16T20:36:08,621 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:36:08,621 DEBUG [RS_CLOSE_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731789330196.cad6a923190659cbe955b214dd2ed352. after waiting 0 ms 2024-11-16T20:36:08,621 INFO [RS:0;40c018648b21:46419 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-16T20:36:08,621 DEBUG [RS_CLOSE_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1731789330196.cad6a923190659cbe955b214dd2ed352. 2024-11-16T20:36:08,621 INFO [RS:0;40c018648b21:46419 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-16T20:36:08,621 INFO [RS:0;40c018648b21:46419 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-16T20:36:08,621 INFO [RS_CLOSE_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing cad6a923190659cbe955b214dd2ed352 1/1 column families, dataSize=10.51 KB heapSize=11.50 KB 2024-11-16T20:36:08,621 INFO [RS:0;40c018648b21:46419 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-16T20:36:08,621 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:36:08,621 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:36:08,622 INFO [RS:0;40c018648b21:46419 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-16T20:36:08,622 DEBUG [RS:0;40c018648b21:46419 {}] regionserver.HRegionServer(1325): Online Regions={cad6a923190659cbe955b214dd2ed352=TestLogRolling-testLogRollOnDatanodeDeath,,1731789330196.cad6a923190659cbe955b214dd2ed352., 1588230740=hbase:meta,,1.1588230740} 2024-11-16T20:36:08,622 DEBUG [RS:0;40c018648b21:46419 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, cad6a923190659cbe955b214dd2ed352 2024-11-16T20:36:08,622 DEBUG [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-16T20:36:08,622 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46125,DS-9da48b7c-cdd6-45f8-8c4d-921ff678f746,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:36:08,622 INFO [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-16T20:36:08,622 DEBUG [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-16T20:36:08,622 DEBUG [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-16T20:36:08,622 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46125,DS-9da48b7c-cdd6-45f8-8c4d-921ff678f746,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:36:08,622 DEBUG [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-16T20:36:08,622 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 2024-11-16T20:36:08,623 INFO [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.71 KB heapSize=3.75 KB 2024-11-16T20:36:08,623 WARN [IPC Server handler 4 on default port 33297 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 has not been closed. Lease recovery is in progress. RecoveryId = 1079 for block blk_1073741837_1013 2024-11-16T20:36:08,623 ERROR [FSHLog-0-hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65-prefix:40c018648b21,46419,1731789328882.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46125,DS-9da48b7c-cdd6-45f8-8c4d-921ff678f746,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:36:08,623 WARN [FSHLog-0-hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65-prefix:40c018648b21,46419,1731789328882.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46125,DS-9da48b7c-cdd6-45f8-8c4d-921ff678f746,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:36:08,623 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 after 1ms 2024-11-16T20:36:08,624 DEBUG [regionserver/40c018648b21:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 40c018648b21%2C46419%2C1731789328882.meta:.meta(num 1731789329897) roll requested 2024-11-16T20:36:08,624 INFO [regionserver/40c018648b21:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 40c018648b21%2C46419%2C1731789328882.meta.1731789368624.meta 2024-11-16T20:36:08,627 DEBUG [RS_CLOSE_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cad6a923190659cbe955b214dd2ed352/.tmp/info/2f3eaa74515748a5a5cef68492a99b11 is 1080, key is row0015/info:/1731789368124/Put/seqid=0 2024-11-16T20:36:08,630 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:36:08,630 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:36:08,630 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:36:08,630 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:36:08,630 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:36:08,630 INFO [regionserver/40c018648b21:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta with entries=8, filesize=2.33 KB; new WAL /user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789368624.meta 2024-11-16T20:36:08,631 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46125,DS-9da48b7c-cdd6-45f8-8c4d-921ff678f746,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:36:08,631 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46125,DS-9da48b7c-cdd6-45f8-8c4d-921ff678f746,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:36:08,631 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta 2024-11-16T20:36:08,631 WARN [IPC Server handler 0 on default port 33297 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta has not been closed. Lease recovery is in progress. RecoveryId = 1082 for block blk_1073741834_1010 2024-11-16T20:36:08,632 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta after 1ms 2024-11-16T20:36:08,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35281 is added to blk_1073741896_1081 (size=14660) 2024-11-16T20:36:08,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34121 is added to blk_1073741896_1081 (size=14660) 2024-11-16T20:36:08,633 INFO [RS_CLOSE_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.51 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cad6a923190659cbe955b214dd2ed352/.tmp/info/2f3eaa74515748a5a5cef68492a99b11 2024-11-16T20:36:08,634 DEBUG [regionserver/40c018648b21:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40931:40931),(127.0.0.1/127.0.0.1:44333:44333)] 2024-11-16T20:36:08,634 DEBUG [regionserver/40c018648b21:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta is not closed yet, will try archiving it next time 2024-11-16T20:36:08,640 DEBUG [RS_CLOSE_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cad6a923190659cbe955b214dd2ed352/.tmp/info/2f3eaa74515748a5a5cef68492a99b11 as hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cad6a923190659cbe955b214dd2ed352/info/2f3eaa74515748a5a5cef68492a99b11 2024-11-16T20:36:08,647 INFO [RS_CLOSE_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cad6a923190659cbe955b214dd2ed352/info/2f3eaa74515748a5a5cef68492a99b11, entries=9, sequenceid=79, filesize=14.3 K 2024-11-16T20:36:08,649 INFO [RS_CLOSE_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~10.51 KB/10758, heapSize ~11.48 KB/11760, currentSize=0 B/0 for cad6a923190659cbe955b214dd2ed352 in 27ms, sequenceid=79, compaction requested=true 2024-11-16T20:36:08,649 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731789330196.cad6a923190659cbe955b214dd2ed352.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cad6a923190659cbe955b214dd2ed352/info/0960d3fe8bc84b538bef29ad4248d92e, hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cad6a923190659cbe955b214dd2ed352/info/70ce71f77c614a19adee97c5eb8436a9, hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cad6a923190659cbe955b214dd2ed352/info/f5039cfa0e684093985bd480f0b7c9e1, hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cad6a923190659cbe955b214dd2ed352/info/a1786a39b8684d48bf66d9c24d7c95cc, hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cad6a923190659cbe955b214dd2ed352/info/d7a2097c1a204068882ae16482ed157d, hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cad6a923190659cbe955b214dd2ed352/info/96d76f3d237c45e8a37117d4491ec0a3] to archive 2024-11-16T20:36:08,651 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731789330196.cad6a923190659cbe955b214dd2ed352.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-16T20:36:08,653 DEBUG [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/data/hbase/meta/1588230740/.tmp/info/e9e9636f7c5047f4b1655d456d9fb9ba is 203, key is TestLogRolling-testLogRollOnDatanodeDeath,,1731789330196.cad6a923190659cbe955b214dd2ed352./info:regioninfo/1731789330575/Put/seqid=0 2024-11-16T20:36:08,653 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731789330196.cad6a923190659cbe955b214dd2ed352.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cad6a923190659cbe955b214dd2ed352/info/0960d3fe8bc84b538bef29ad4248d92e to hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cad6a923190659cbe955b214dd2ed352/info/0960d3fe8bc84b538bef29ad4248d92e 2024-11-16T20:36:08,655 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731789330196.cad6a923190659cbe955b214dd2ed352.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cad6a923190659cbe955b214dd2ed352/info/70ce71f77c614a19adee97c5eb8436a9 to hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cad6a923190659cbe955b214dd2ed352/info/70ce71f77c614a19adee97c5eb8436a9 2024-11-16T20:36:08,654 WARN [Thread-1046 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741897_1083 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:36:08,655 WARN [Thread-1046 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1083962379-172.17.0.2-1731789326364:blk_1073741897_1083 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34869,DS-61c766fc-84d8-4f41-b5fc-76f4226f4319,DISK], DatanodeInfoWithStorage[127.0.0.1:35281,DS-1631de22-0b9f-4017-baf0-41d8a91735fd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34869,DS-61c766fc-84d8-4f41-b5fc-76f4226f4319,DISK]) is bad. 2024-11-16T20:36:08,655 WARN [Thread-1046 {}] hdfs.DataStreamer(1850): Abandoning BP-1083962379-172.17.0.2-1731789326364:blk_1073741897_1083 2024-11-16T20:36:08,655 WARN [Thread-1046 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34869,DS-61c766fc-84d8-4f41-b5fc-76f4226f4319,DISK] 2024-11-16T20:36:08,656 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731789330196.cad6a923190659cbe955b214dd2ed352.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cad6a923190659cbe955b214dd2ed352/info/f5039cfa0e684093985bd480f0b7c9e1 to hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cad6a923190659cbe955b214dd2ed352/info/f5039cfa0e684093985bd480f0b7c9e1 2024-11-16T20:36:08,657 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731789330196.cad6a923190659cbe955b214dd2ed352.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cad6a923190659cbe955b214dd2ed352/info/a1786a39b8684d48bf66d9c24d7c95cc to hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cad6a923190659cbe955b214dd2ed352/info/a1786a39b8684d48bf66d9c24d7c95cc 2024-11-16T20:36:08,659 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731789330196.cad6a923190659cbe955b214dd2ed352.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cad6a923190659cbe955b214dd2ed352/info/d7a2097c1a204068882ae16482ed157d to hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cad6a923190659cbe955b214dd2ed352/info/d7a2097c1a204068882ae16482ed157d 2024-11-16T20:36:08,661 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731789330196.cad6a923190659cbe955b214dd2ed352.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cad6a923190659cbe955b214dd2ed352/info/96d76f3d237c45e8a37117d4491ec0a3 to hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cad6a923190659cbe955b214dd2ed352/info/96d76f3d237c45e8a37117d4491ec0a3 2024-11-16T20:36:08,661 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731789330196.cad6a923190659cbe955b214dd2ed352.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=40c018648b21:37849 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-16T20:36:08,662 WARN [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731789330196.cad6a923190659cbe955b214dd2ed352.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [0960d3fe8bc84b538bef29ad4248d92e=10347, 70ce71f77c614a19adee97c5eb8436a9=12506, f5039cfa0e684093985bd480f0b7c9e1=17994, a1786a39b8684d48bf66d9c24d7c95cc=6027, d7a2097c1a204068882ae16482ed157d=6027, 96d76f3d237c45e8a37117d4491ec0a3=6027] 2024-11-16T20:36:08,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34121 is added to blk_1073741898_1084 (size=7089) 2024-11-16T20:36:08,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35281 is added to blk_1073741898_1084 (size=7089) 2024-11-16T20:36:08,662 INFO [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.50 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/data/hbase/meta/1588230740/.tmp/info/e9e9636f7c5047f4b1655d456d9fb9ba 2024-11-16T20:36:08,666 DEBUG [RS_CLOSE_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/cad6a923190659cbe955b214dd2ed352/recovered.edits/82.seqid, newMaxSeqId=82, maxSeqId=1 2024-11-16T20:36:08,667 INFO [RS_CLOSE_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731789330196.cad6a923190659cbe955b214dd2ed352. 2024-11-16T20:36:08,667 DEBUG [RS_CLOSE_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for cad6a923190659cbe955b214dd2ed352: Waiting for close lock at 1731789368620Running coprocessor pre-close hooks at 1731789368620Disabling compacts and flushes for region at 1731789368620Disabling writes for close at 1731789368621 (+1 ms)Obtaining lock to block concurrent updates at 1731789368621Preparing flush snapshotting stores in cad6a923190659cbe955b214dd2ed352 at 1731789368621Finished memstore snapshotting TestLogRolling-testLogRollOnDatanodeDeath,,1731789330196.cad6a923190659cbe955b214dd2ed352., syncing WAL and waiting on mvcc, flushsize=dataSize=10758, getHeapSize=11760, getOffHeapSize=0, getCellsCount=10 at 1731789368621Flushing stores of TestLogRolling-testLogRollOnDatanodeDeath,,1731789330196.cad6a923190659cbe955b214dd2ed352. at 1731789368622 (+1 ms)Flushing cad6a923190659cbe955b214dd2ed352/info: creating writer at 1731789368622Flushing cad6a923190659cbe955b214dd2ed352/info: appending metadata at 1731789368626 (+4 ms)Flushing cad6a923190659cbe955b214dd2ed352/info: closing flushed file at 1731789368626Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5686f91f: reopening flushed file at 1731789368639 (+13 ms)Finished flush of dataSize ~10.51 KB/10758, heapSize ~11.48 KB/11760, currentSize=0 B/0 for cad6a923190659cbe955b214dd2ed352 in 27ms, sequenceid=79, compaction requested=true at 1731789368649 (+10 ms)Writing region close event to WAL at 1731789368662 (+13 ms)Running coprocessor post-close hooks at 1731789368667 (+5 ms)Closed at 1731789368667 2024-11-16T20:36:08,667 DEBUG [RS_CLOSE_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731789330196.cad6a923190659cbe955b214dd2ed352. 2024-11-16T20:36:08,689 DEBUG [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/data/hbase/meta/1588230740/.tmp/ns/b9722aa62adc4a6dbaf1977e11ee0bf8 is 43, key is default/ns:d/1731789329980/Put/seqid=0 2024-11-16T20:36:08,691 WARN [Thread-1053 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741899_1085 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:36:08,691 WARN [Thread-1053 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1083962379-172.17.0.2-1731789326364:blk_1073741899_1085 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34869,DS-61c766fc-84d8-4f41-b5fc-76f4226f4319,DISK], DatanodeInfoWithStorage[127.0.0.1:35281,DS-1631de22-0b9f-4017-baf0-41d8a91735fd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34869,DS-61c766fc-84d8-4f41-b5fc-76f4226f4319,DISK]) is bad. 2024-11-16T20:36:08,691 WARN [Thread-1053 {}] hdfs.DataStreamer(1850): Abandoning BP-1083962379-172.17.0.2-1731789326364:blk_1073741899_1085 2024-11-16T20:36:08,692 WARN [Thread-1053 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34869,DS-61c766fc-84d8-4f41-b5fc-76f4226f4319,DISK] 2024-11-16T20:36:08,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34121 is added to blk_1073741900_1086 (size=5153) 2024-11-16T20:36:08,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35281 is added to blk_1073741900_1086 (size=5153) 2024-11-16T20:36:08,697 INFO [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/data/hbase/meta/1588230740/.tmp/ns/b9722aa62adc4a6dbaf1977e11ee0bf8 2024-11-16T20:36:08,718 DEBUG [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/data/hbase/meta/1588230740/.tmp/table/5dfa8f61ac85450db7fe02bc3f8634dd is 77, key is TestLogRolling-testLogRollOnDatanodeDeath/table:state/1731789330586/Put/seqid=0 2024-11-16T20:36:08,722 WARN [Thread-1059 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741901_1087 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34869 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:36:08,722 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1242280705_22 at /127.0.0.1:37298 [Receiving block BP-1083962379-172.17.0.2-1731789326364:blk_1073741901_1087] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64da23ca-2820-0825-2f43-3ba7ac80db0b/cluster_1dc8cece-ac3d-b8bc-ccd8-8c4a0146ef64/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64da23ca-2820-0825-2f43-3ba7ac80db0b/cluster_1dc8cece-ac3d-b8bc-ccd8-8c4a0146ef64/data/data4]'}, localName='127.0.0.1:34121', datanodeUuid='f19a5063-ec0a-4807-a891-570d666b0316', xmitsInProgress=0}:Exception transferring block BP-1083962379-172.17.0.2-1731789326364:blk_1073741901_1087 to mirror 127.0.0.1:34869 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T20:36:08,722 WARN [Thread-1059 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1083962379-172.17.0.2-1731789326364:blk_1073741901_1087 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34121,DS-66984a25-bb75-45b5-9d89-6a192cfe26fd,DISK], DatanodeInfoWithStorage[127.0.0.1:34869,DS-61c766fc-84d8-4f41-b5fc-76f4226f4319,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34869,DS-61c766fc-84d8-4f41-b5fc-76f4226f4319,DISK]) is bad. 2024-11-16T20:36:08,722 WARN [Thread-1059 {}] hdfs.DataStreamer(1850): Abandoning BP-1083962379-172.17.0.2-1731789326364:blk_1073741901_1087 2024-11-16T20:36:08,722 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1242280705_22 at /127.0.0.1:37298 [Receiving block BP-1083962379-172.17.0.2-1731789326364:blk_1073741901_1087] {}] datanode.BlockReceiver(316): Block 1073741901 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-16T20:36:08,722 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1242280705_22 at /127.0.0.1:37298 [Receiving block BP-1083962379-172.17.0.2-1731789326364:blk_1073741901_1087] {}] datanode.DataXceiver(331): 127.0.0.1:34121:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37298 dst: /127.0.0.1:34121 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T20:36:08,723 WARN [Thread-1059 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34869,DS-61c766fc-84d8-4f41-b5fc-76f4226f4319,DISK] 2024-11-16T20:36:08,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35281 is added to blk_1073741902_1088 (size=5424) 2024-11-16T20:36:08,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34121 is added to blk_1073741902_1088 (size=5424) 2024-11-16T20:36:08,728 INFO [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=146 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/data/hbase/meta/1588230740/.tmp/table/5dfa8f61ac85450db7fe02bc3f8634dd 2024-11-16T20:36:08,735 DEBUG [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/data/hbase/meta/1588230740/.tmp/info/e9e9636f7c5047f4b1655d456d9fb9ba as hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/data/hbase/meta/1588230740/info/e9e9636f7c5047f4b1655d456d9fb9ba 2024-11-16T20:36:08,743 INFO [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/data/hbase/meta/1588230740/info/e9e9636f7c5047f4b1655d456d9fb9ba, entries=10, sequenceid=11, filesize=6.9 K 2024-11-16T20:36:08,745 DEBUG [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/data/hbase/meta/1588230740/.tmp/ns/b9722aa62adc4a6dbaf1977e11ee0bf8 as hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/data/hbase/meta/1588230740/ns/b9722aa62adc4a6dbaf1977e11ee0bf8 2024-11-16T20:36:08,752 INFO [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/data/hbase/meta/1588230740/ns/b9722aa62adc4a6dbaf1977e11ee0bf8, entries=2, sequenceid=11, filesize=5.0 K 2024-11-16T20:36:08,753 DEBUG [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/data/hbase/meta/1588230740/.tmp/table/5dfa8f61ac85450db7fe02bc3f8634dd as hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/data/hbase/meta/1588230740/table/5dfa8f61ac85450db7fe02bc3f8634dd 2024-11-16T20:36:08,760 INFO [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/data/hbase/meta/1588230740/table/5dfa8f61ac85450db7fe02bc3f8634dd, entries=2, sequenceid=11, filesize=5.3 K 2024-11-16T20:36:08,761 INFO [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 139ms, sequenceid=11, compaction requested=false 2024-11-16T20:36:08,766 DEBUG [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-16T20:36:08,767 DEBUG [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-16T20:36:08,767 INFO [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-16T20:36:08,767 DEBUG [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731789368622Running coprocessor pre-close hooks at 1731789368622Disabling compacts and flushes for region at 1731789368622Disabling writes for close at 1731789368622Obtaining lock to block concurrent updates at 1731789368623 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1731789368623Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1752, getHeapSize=3776, getOffHeapSize=0, getCellsCount=14 at 1731789368623Flushing stores of hbase:meta,,1.1588230740 at 1731789368635 (+12 ms)Flushing 1588230740/info: creating writer at 1731789368635Flushing 1588230740/info: appending metadata at 1731789368652 (+17 ms)Flushing 1588230740/info: closing flushed file at 1731789368653 (+1 ms)Flushing 1588230740/ns: creating writer at 1731789368669 (+16 ms)Flushing 1588230740/ns: appending metadata at 1731789368688 (+19 ms)Flushing 1588230740/ns: closing flushed file at 1731789368688Flushing 1588230740/table: creating writer at 1731789368703 (+15 ms)Flushing 1588230740/table: appending metadata at 1731789368718 (+15 ms)Flushing 1588230740/table: closing flushed file at 1731789368718Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1ed918c4: reopening flushed file at 1731789368734 (+16 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@31181068: reopening flushed file at 1731789368744 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@77e46a90: reopening flushed file at 1731789368752 (+8 ms)Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 139ms, sequenceid=11, compaction requested=false at 1731789368761 (+9 ms)Writing region close event to WAL at 1731789368762 (+1 ms)Running coprocessor post-close hooks at 1731789368767 (+5 ms)Closed at 1731789368767 2024-11-16T20:36:08,768 DEBUG [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-16T20:36:08,822 INFO [RS:0;40c018648b21:46419 {}] regionserver.HRegionServer(976): stopping server 40c018648b21,46419,1731789328882; all regions closed. 2024-11-16T20:36:08,823 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:36:08,823 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:36:08,823 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:36:08,823 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:36:08,823 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:36:08,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35281 is added to blk_1073741895_1080 (size=825) 2024-11-16T20:36:08,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34121 is added to blk_1073741895_1080 (size=825) 2024-11-16T20:36:09,196 INFO [regionserver/40c018648b21:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-16T20:36:09,196 INFO [regionserver/40c018648b21:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-16T20:36:09,372 INFO [regionserver/40c018648b21:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-16T20:36:09,372 INFO [regionserver/40c018648b21:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-16T20:36:09,374 INFO [regionserver/40c018648b21:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-16T20:36:10,154 INFO [regionserver/40c018648b21:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-16T20:36:10,201 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@622874f[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:34121, datanodeUuid=f19a5063-ec0a-4807-a891-570d666b0316, infoPort=40931, infoSecurePort=0, ipcPort=39417, storageInfo=lv=-57;cid=testClusterID;nsid=2134441233;c=1731789326364):Failed to transfer BP-1083962379-172.17.0.2-1731789326364:blk_1073741832_1008 to 127.0.0.1:34869 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T20:36:10,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35281 is added to blk_1073741836_1012 (size=76) 2024-11-16T20:36:10,253 INFO [master/40c018648b21:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-16T20:36:10,253 INFO [master/40c018648b21:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-16T20:36:11,201 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@622874f[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:34121, datanodeUuid=f19a5063-ec0a-4807-a891-570d666b0316, infoPort=40931, infoSecurePort=0, ipcPort=39417, storageInfo=lv=-57;cid=testClusterID;nsid=2134441233;c=1731789326364):Failed to transfer BP-1083962379-172.17.0.2-1731789326364:blk_1073741826_1002 to 127.0.0.1:34869 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T20:36:11,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35281 is added to blk_1073741828_1004 (size=1189) 2024-11-16T20:36:12,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35281 is added to blk_1073741829_1005 (size=34) 2024-11-16T20:36:12,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35281 is added to blk_1073741827_1003 (size=196) 2024-11-16T20:36:12,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34121 is added to blk_1073741877_1060 (size=12911) 2024-11-16T20:36:12,624 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 after 4002ms 2024-11-16T20:36:12,633 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta after 4001ms 2024-11-16T20:36:13,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35281 is added to blk_1073741825_1001 (size=7) 2024-11-16T20:36:13,622 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-16T20:36:13,625 DEBUG [RS:1;40c018648b21:39701 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/oldWALs 2024-11-16T20:36:13,625 INFO [RS:1;40c018648b21:39701 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 40c018648b21%2C39701%2C1731789330052:(num 1731789330297) 2024-11-16T20:36:13,625 DEBUG [RS:1;40c018648b21:39701 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T20:36:13,625 INFO [RS:1;40c018648b21:39701 {}] regionserver.LeaseManager(133): Closed leases 2024-11-16T20:36:13,625 INFO [RS:1;40c018648b21:39701 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-16T20:36:13,626 INFO [RS:1;40c018648b21:39701 {}] hbase.ChoreService(370): Chore service for: regionserver/40c018648b21:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-16T20:36:13,626 INFO [RS:1;40c018648b21:39701 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-16T20:36:13,626 INFO [regionserver/40c018648b21:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-16T20:36:13,626 INFO [RS:1;40c018648b21:39701 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-16T20:36:13,626 INFO [RS:1;40c018648b21:39701 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-16T20:36:13,626 INFO [RS:1;40c018648b21:39701 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-16T20:36:13,626 INFO [RS:1;40c018648b21:39701 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39701 2024-11-16T20:36:13,631 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor115.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor115.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor115.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:36:13,669 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:36:13,690 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39701-0x101455c0ce00002, quorum=127.0.0.1:50571, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/40c018648b21,39701,1731789330052 2024-11-16T20:36:13,690 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37849-0x101455c0ce00000, quorum=127.0.0.1:50571, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-16T20:36:13,691 INFO [RS:1;40c018648b21:39701 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-16T20:36:13,691 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:36:13,691 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [40c018648b21,39701,1731789330052] 2024-11-16T20:36:13,691 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:36:13,692 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:36:13,692 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:36:13,692 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:36:13,702 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:36:13,703 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:36:13,712 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/40c018648b21,39701,1731789330052 already deleted, retry=false 2024-11-16T20:36:13,712 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 40c018648b21,39701,1731789330052 expired; onlineServers=1 2024-11-16T20:36:13,801 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39701-0x101455c0ce00002, quorum=127.0.0.1:50571, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T20:36:13,801 INFO [RS:1;40c018648b21:39701 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-16T20:36:13,801 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39701-0x101455c0ce00002, quorum=127.0.0.1:50571, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T20:36:13,801 INFO [RS:1;40c018648b21:39701 {}] regionserver.HRegionServer(1031): Exiting; stopping=40c018648b21,39701,1731789330052; zookeeper connection closed. 2024-11-16T20:36:13,802 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@334d1f0a {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@334d1f0a 2024-11-16T20:36:13,824 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-16T20:36:13,828 DEBUG [RS:0;40c018648b21:46419 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/oldWALs 2024-11-16T20:36:13,828 INFO [RS:0;40c018648b21:46419 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 40c018648b21%2C46419%2C1731789328882.meta:.meta(num 1731789368624) 2024-11-16T20:36:13,828 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:36:13,828 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:36:13,829 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:36:13,829 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:36:13,829 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:36:13,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35281 is added to blk_1073741893_1077 (size=15850) 2024-11-16T20:36:13,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34121 is added to blk_1073741893_1077 (size=15850) 2024-11-16T20:36:13,835 DEBUG [RS:0;40c018648b21:46419 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/oldWALs 2024-11-16T20:36:13,835 INFO [RS:0;40c018648b21:46419 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 40c018648b21%2C46419%2C1731789328882:(num 1731789368106) 2024-11-16T20:36:13,835 DEBUG [RS:0;40c018648b21:46419 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T20:36:13,835 INFO [RS:0;40c018648b21:46419 {}] regionserver.LeaseManager(133): Closed leases 2024-11-16T20:36:13,835 INFO [RS:0;40c018648b21:46419 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-16T20:36:13,835 INFO [RS:0;40c018648b21:46419 {}] hbase.ChoreService(370): Chore service for: regionserver/40c018648b21:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-16T20:36:13,835 INFO [RS:0;40c018648b21:46419 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-16T20:36:13,835 INFO [regionserver/40c018648b21:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-16T20:36:13,836 INFO [RS:0;40c018648b21:46419 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46419 2024-11-16T20:36:13,848 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46419-0x101455c0ce00001, quorum=127.0.0.1:50571, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/40c018648b21,46419,1731789328882 2024-11-16T20:36:13,848 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37849-0x101455c0ce00000, quorum=127.0.0.1:50571, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-16T20:36:13,848 INFO [RS:0;40c018648b21:46419 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-16T20:36:13,859 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [40c018648b21,46419,1731789328882] 2024-11-16T20:36:13,869 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/40c018648b21,46419,1731789328882 already deleted, retry=false 2024-11-16T20:36:13,869 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 40c018648b21,46419,1731789328882 expired; onlineServers=0 2024-11-16T20:36:13,869 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '40c018648b21,37849,1731789328708' ***** 2024-11-16T20:36:13,870 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-16T20:36:13,870 INFO [M:0;40c018648b21:37849 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-16T20:36:13,870 INFO [M:0;40c018648b21:37849 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-16T20:36:13,870 DEBUG [M:0;40c018648b21:37849 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-16T20:36:13,870 DEBUG [M:0;40c018648b21:37849 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-16T20:36:13,870 DEBUG [master/40c018648b21:0:becomeActiveMaster-HFileCleaner.small.0-1731789329262 {}] cleaner.HFileCleaner(306): Exit Thread[master/40c018648b21:0:becomeActiveMaster-HFileCleaner.small.0-1731789329262,5,FailOnTimeoutGroup] 2024-11-16T20:36:13,870 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-16T20:36:13,870 DEBUG [master/40c018648b21:0:becomeActiveMaster-HFileCleaner.large.0-1731789329261 {}] cleaner.HFileCleaner(306): Exit Thread[master/40c018648b21:0:becomeActiveMaster-HFileCleaner.large.0-1731789329261,5,FailOnTimeoutGroup] 2024-11-16T20:36:13,871 INFO [M:0;40c018648b21:37849 {}] hbase.ChoreService(370): Chore service for: master/40c018648b21:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-16T20:36:13,871 INFO [M:0;40c018648b21:37849 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-16T20:36:13,871 DEBUG [M:0;40c018648b21:37849 {}] master.HMaster(1795): Stopping service threads 2024-11-16T20:36:13,871 INFO [M:0;40c018648b21:37849 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-16T20:36:13,871 INFO [M:0;40c018648b21:37849 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-16T20:36:13,871 INFO [M:0;40c018648b21:37849 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-16T20:36:13,872 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-16T20:36:13,880 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37849-0x101455c0ce00000, quorum=127.0.0.1:50571, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-16T20:36:13,880 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37849-0x101455c0ce00000, quorum=127.0.0.1:50571, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:36:13,880 DEBUG [M:0;40c018648b21:37849 {}] zookeeper.ZKUtil(347): master:37849-0x101455c0ce00000, quorum=127.0.0.1:50571, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-16T20:36:13,880 WARN [M:0;40c018648b21:37849 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-16T20:36:13,881 INFO [M:0;40c018648b21:37849 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/.lastflushedseqids 2024-11-16T20:36:13,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34121 is added to blk_1073741903_1089 (size=130) 2024-11-16T20:36:13,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35281 is added to blk_1073741903_1089 (size=130) 2024-11-16T20:36:13,889 INFO [M:0;40c018648b21:37849 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-16T20:36:13,889 INFO [M:0;40c018648b21:37849 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-16T20:36:13,889 DEBUG [M:0;40c018648b21:37849 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-16T20:36:13,889 INFO [M:0;40c018648b21:37849 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T20:36:13,890 DEBUG [M:0;40c018648b21:37849 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T20:36:13,890 DEBUG [M:0;40c018648b21:37849 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-16T20:36:13,890 DEBUG [M:0;40c018648b21:37849 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T20:36:13,890 INFO [M:0;40c018648b21:37849 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.25 KB heapSize=29.49 KB 2024-11-16T20:36:13,913 DEBUG [M:0;40c018648b21:37849 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/861a874036324418a49c8ea63b6897f9 is 82, key is hbase:meta,,1/info:regioninfo/1731789329931/Put/seqid=0 2024-11-16T20:36:13,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34121 is added to blk_1073741904_1090 (size=5672) 2024-11-16T20:36:13,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35281 is added to blk_1073741904_1090 (size=5672) 2024-11-16T20:36:13,959 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46419-0x101455c0ce00001, quorum=127.0.0.1:50571, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T20:36:13,959 INFO [RS:0;40c018648b21:46419 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-16T20:36:13,959 INFO [RS:0;40c018648b21:46419 {}] regionserver.HRegionServer(1031): Exiting; stopping=40c018648b21,46419,1731789328882; zookeeper connection closed. 2024-11-16T20:36:13,959 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46419-0x101455c0ce00001, quorum=127.0.0.1:50571, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T20:36:13,959 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@14c4ee89 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@14c4ee89 2024-11-16T20:36:13,960 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 2 regionserver(s) complete 2024-11-16T20:36:14,205 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-16T20:36:14,225 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:36:14,225 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:36:14,226 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:36:14,226 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:36:14,227 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:36:14,227 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:36:14,233 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:36:14,235 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:36:14,322 INFO [M:0;40c018648b21:37849 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/861a874036324418a49c8ea63b6897f9 2024-11-16T20:36:14,352 DEBUG [M:0;40c018648b21:37849 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/cab806ed561b44b795e2e83f00f99ec2 is 774, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731789330591/Put/seqid=0 2024-11-16T20:36:14,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34121 is added to blk_1073741905_1091 (size=6255) 2024-11-16T20:36:14,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35281 is added to blk_1073741905_1091 (size=6255) 2024-11-16T20:36:14,358 INFO [M:0;40c018648b21:37849 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.58 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/cab806ed561b44b795e2e83f00f99ec2 2024-11-16T20:36:14,363 INFO [M:0;40c018648b21:37849 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for cab806ed561b44b795e2e83f00f99ec2 2024-11-16T20:36:14,379 DEBUG [M:0;40c018648b21:37849 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/e7535549731f4519bb49e1d858f50af5 is 69, key is 40c018648b21,39701,1731789330052/rs:state/1731789330126/Put/seqid=0 2024-11-16T20:36:14,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35281 is added to blk_1073741906_1092 (size=5224) 2024-11-16T20:36:14,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34121 is added to blk_1073741906_1092 (size=5224) 2024-11-16T20:36:14,384 INFO [M:0;40c018648b21:37849 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=130 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/e7535549731f4519bb49e1d858f50af5 2024-11-16T20:36:14,407 DEBUG [M:0;40c018648b21:37849 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/e29da4dbfae34ba3988cf308d9af1c72 is 52, key is load_balancer_on/state:d/1731789330036/Put/seqid=0 2024-11-16T20:36:14,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34121 is added to blk_1073741907_1093 (size=5056) 2024-11-16T20:36:14,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35281 is added to blk_1073741907_1093 (size=5056) 2024-11-16T20:36:14,412 INFO [M:0;40c018648b21:37849 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/e29da4dbfae34ba3988cf308d9af1c72 2024-11-16T20:36:14,418 DEBUG [M:0;40c018648b21:37849 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/861a874036324418a49c8ea63b6897f9 as hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/861a874036324418a49c8ea63b6897f9 2024-11-16T20:36:14,424 INFO [M:0;40c018648b21:37849 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/861a874036324418a49c8ea63b6897f9, entries=8, sequenceid=60, filesize=5.5 K 2024-11-16T20:36:14,425 DEBUG [M:0;40c018648b21:37849 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/cab806ed561b44b795e2e83f00f99ec2 as hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/cab806ed561b44b795e2e83f00f99ec2 2024-11-16T20:36:14,432 INFO [M:0;40c018648b21:37849 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for cab806ed561b44b795e2e83f00f99ec2 2024-11-16T20:36:14,432 INFO [M:0;40c018648b21:37849 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/cab806ed561b44b795e2e83f00f99ec2, entries=6, sequenceid=60, filesize=6.1 K 2024-11-16T20:36:14,433 DEBUG [M:0;40c018648b21:37849 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/e7535549731f4519bb49e1d858f50af5 as hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/e7535549731f4519bb49e1d858f50af5 2024-11-16T20:36:14,438 INFO [M:0;40c018648b21:37849 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/e7535549731f4519bb49e1d858f50af5, entries=2, sequenceid=60, filesize=5.1 K 2024-11-16T20:36:14,439 DEBUG [M:0;40c018648b21:37849 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/e29da4dbfae34ba3988cf308d9af1c72 as hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/e29da4dbfae34ba3988cf308d9af1c72 2024-11-16T20:36:14,444 INFO [M:0;40c018648b21:37849 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/e29da4dbfae34ba3988cf308d9af1c72, entries=1, sequenceid=60, filesize=4.9 K 2024-11-16T20:36:14,445 INFO [M:0;40c018648b21:37849 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.25 KB/23805, heapSize ~29.43 KB/30136, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 555ms, sequenceid=60, compaction requested=false 2024-11-16T20:36:14,447 INFO [M:0;40c018648b21:37849 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T20:36:14,447 DEBUG [M:0;40c018648b21:37849 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731789373889Disabling compacts and flushes for region at 1731789373889Disabling writes for close at 1731789373890 (+1 ms)Obtaining lock to block concurrent updates at 1731789373890Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731789373890Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23805, getHeapSize=30136, getOffHeapSize=0, getCellsCount=71 at 1731789373890Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731789373891 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731789373891Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731789373912 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731789373912Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731789374332 (+420 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731789374351 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731789374351Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731789374363 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731789374378 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731789374378Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731789374390 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731789374406 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731789374406Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7304d2a4: reopening flushed file at 1731789374417 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@26b90822: reopening flushed file at 1731789374424 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5f4d2c86: reopening flushed file at 1731789374432 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7a743113: reopening flushed file at 1731789374439 (+7 ms)Finished flush of dataSize ~23.25 KB/23805, heapSize ~29.43 KB/30136, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 555ms, sequenceid=60, compaction requested=false at 1731789374445 (+6 ms)Writing region close event to WAL at 1731789374447 (+2 ms)Closed at 1731789374447 2024-11-16T20:36:14,447 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:36:14,447 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:36:14,448 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:36:14,448 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:36:14,448 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:36:14,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35281 is added to blk_1073741891_1074 (size=1045) 2024-11-16T20:36:14,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34121 is added to blk_1073741891_1074 (size=1045) 2024-11-16T20:36:14,632 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:36:14,633 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:36:15,143 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-16T20:36:15,144 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-16T20:36:15,144 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-16T20:36:15,144 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-16T20:36:15,221 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@73247985 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1083962379-172.17.0.2-1731789326364:blk_1073741830_1006, datanode=DatanodeInfoWithStorage[127.0.0.1:46125,null,null]) java.net.ConnectException: Call From 40c018648b21/172.17.0.2 to localhost:44965 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-16T20:36:15,286 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/MasterData/WALs/40c018648b21,37849,1731789328708/40c018648b21%2C37849%2C1731789328708.1731789329038 to hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/MasterData/oldWALs/40c018648b21%2C37849%2C1731789328708.1731789329038 2024-11-16T20:36:15,291 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/MasterData/oldWALs/40c018648b21%2C37849%2C1731789328708.1731789329038 to hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/oldWALs/40c018648b21%2C37849%2C1731789328708.1731789329038$masterlocalwal$ 2024-11-16T20:36:15,291 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-16T20:36:15,291 INFO [M:0;40c018648b21:37849 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-16T20:36:15,292 INFO [M:0;40c018648b21:37849 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37849 2024-11-16T20:36:15,292 INFO [M:0;40c018648b21:37849 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-16T20:36:15,443 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37849-0x101455c0ce00000, quorum=127.0.0.1:50571, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T20:36:15,443 INFO [M:0;40c018648b21:37849 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-16T20:36:15,443 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37849-0x101455c0ce00000, quorum=127.0.0.1:50571, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T20:36:15,447 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@14b00457{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T20:36:15,447 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@22c6c03b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T20:36:15,447 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T20:36:15,448 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@381443d3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T20:36:15,448 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@441dcfc4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64da23ca-2820-0825-2f43-3ba7ac80db0b/hadoop.log.dir/,STOPPED} 2024-11-16T20:36:15,453 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@be2234b {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1083962379-172.17.0.2-1731789326364:blk_1073741837_1013, datanode=DatanodeInfoWithStorage[127.0.0.1:46125,null,null]) java.io.InterruptedIOException: DestHost:destPort localhost:44965 , LocalHost:localPort 40c018648b21/172.17.0.2:0. Failed on local exception: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:936) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:963) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more Caused by: java.lang.InterruptedException: sleep interrupted at java.lang.Thread.sleep(Native Method) ~[?:?] at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-16T20:36:15,454 WARN [BP-1083962379-172.17.0.2-1731789326364 heartbeating to localhost/127.0.0.1:33297 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1083962379-172.17.0.2-1731789326364 (Datanode Uuid f19a5063-ec0a-4807-a891-570d666b0316) service to localhost/127.0.0.1:33297 2024-11-16T20:36:15,454 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64da23ca-2820-0825-2f43-3ba7ac80db0b/cluster_1dc8cece-ac3d-b8bc-ccd8-8c4a0146ef64/data/data3/current/BP-1083962379-172.17.0.2-1731789326364 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T20:36:15,454 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@be2234b {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1083962379-172.17.0.2-1731789326364:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:34121,null,null]) java.io.IOException: No block pool offer service for bpid=BP-1083962379-172.17.0.2-1731789326364 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T20:36:15,455 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64da23ca-2820-0825-2f43-3ba7ac80db0b/cluster_1dc8cece-ac3d-b8bc-ccd8-8c4a0146ef64/data/data4/current/BP-1083962379-172.17.0.2-1731789326364 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T20:36:15,455 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@be2234b {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1083962379-172.17.0.2-1731789326364:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:46125,null,null]) java.io.IOException: No block pool offer service for bpid=BP-1083962379-172.17.0.2-1731789326364 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T20:36:15,455 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T20:36:15,455 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@be2234b {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-1083962379-172.17.0.2-1731789326364:blk_1073741834_1010; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:34121,null,null], DatanodeInfoWithStorage[127.0.0.1:46125,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: All datanodes failed: block=BP-1083962379-172.17.0.2-1731789326364:blk_1073741834_1010, datanodeids=[DatanodeInfoWithStorage[127.0.0.1:34121,null,null], DatanodeInfoWithStorage[127.0.0.1:46125,null,null]] 2024-11-16T20:36:15,459 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@57d6f5a1{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T20:36:15,459 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6a2bfd90{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T20:36:15,459 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T20:36:15,459 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3f85c2b2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T20:36:15,459 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@23038dc2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64da23ca-2820-0825-2f43-3ba7ac80db0b/hadoop.log.dir/,STOPPED} 2024-11-16T20:36:15,461 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T20:36:15,461 WARN [BP-1083962379-172.17.0.2-1731789326364 heartbeating to localhost/127.0.0.1:33297 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T20:36:15,461 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T20:36:15,461 WARN [BP-1083962379-172.17.0.2-1731789326364 heartbeating to localhost/127.0.0.1:33297 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1083962379-172.17.0.2-1731789326364 (Datanode Uuid caa29ef5-220e-47df-9a82-70d13952f572) service to localhost/127.0.0.1:33297 2024-11-16T20:36:15,461 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64da23ca-2820-0825-2f43-3ba7ac80db0b/cluster_1dc8cece-ac3d-b8bc-ccd8-8c4a0146ef64/data/data7/current/BP-1083962379-172.17.0.2-1731789326364 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T20:36:15,462 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64da23ca-2820-0825-2f43-3ba7ac80db0b/cluster_1dc8cece-ac3d-b8bc-ccd8-8c4a0146ef64/data/data8/current/BP-1083962379-172.17.0.2-1731789326364 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T20:36:15,462 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T20:36:15,467 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2606b08f{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-16T20:36:15,467 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@c053989{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T20:36:15,467 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T20:36:15,467 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3150e6db{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T20:36:15,467 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1aa07d80{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64da23ca-2820-0825-2f43-3ba7ac80db0b/hadoop.log.dir/,STOPPED} 2024-11-16T20:36:15,475 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-16T20:36:15,512 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-16T20:36:15,521 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=158 (was 82) Potentially hanging thread: HMaster-EventLoopGroup-7-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$900/0x00007f5164bf4000.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'DataNode' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:33297 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-17-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.2@localhost:33297 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:46027 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33297 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-16-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:33297 from jenkins.hfs.2 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:33297 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33297 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.1@localhost:46027 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Command processor java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Potentially hanging thread: nioEventLoopGroup-16-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33297 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-17-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33297 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$900/0x00007f5164bf4000.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-17-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.3@localhost:33297 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-16-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:33297 from jenkins.hfs.3 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:33297 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) - Thread LEAK? -, OpenFileDescriptor=450 (was 402) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=225 (was 200) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=4169 (was 5040) 2024-11-16T20:36:15,529 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=158, OpenFileDescriptor=450, MaxFileDescriptor=1048576, SystemLoadAverage=225, ProcessCount=11, AvailableMemoryMB=4168 2024-11-16T20:36:15,529 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-16T20:36:15,529 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64da23ca-2820-0825-2f43-3ba7ac80db0b/hadoop.log.dir so I do NOT create it in target/test-data/8fc0ccb3-8d8a-0fae-2969-7ef6a7e107bb 2024-11-16T20:36:15,529 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64da23ca-2820-0825-2f43-3ba7ac80db0b/hadoop.tmp.dir so I do NOT create it in target/test-data/8fc0ccb3-8d8a-0fae-2969-7ef6a7e107bb 2024-11-16T20:36:15,529 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fc0ccb3-8d8a-0fae-2969-7ef6a7e107bb/cluster_b84409d9-577f-5fbd-0002-d56360c32b89, deleteOnExit=true 2024-11-16T20:36:15,530 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-16T20:36:15,530 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fc0ccb3-8d8a-0fae-2969-7ef6a7e107bb/test.cache.data in system properties and HBase conf 2024-11-16T20:36:15,530 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fc0ccb3-8d8a-0fae-2969-7ef6a7e107bb/hadoop.tmp.dir in system properties and HBase conf 2024-11-16T20:36:15,530 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fc0ccb3-8d8a-0fae-2969-7ef6a7e107bb/hadoop.log.dir in system properties and HBase conf 2024-11-16T20:36:15,530 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fc0ccb3-8d8a-0fae-2969-7ef6a7e107bb/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-16T20:36:15,530 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fc0ccb3-8d8a-0fae-2969-7ef6a7e107bb/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-16T20:36:15,530 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-16T20:36:15,530 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-16T20:36:15,530 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fc0ccb3-8d8a-0fae-2969-7ef6a7e107bb/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-16T20:36:15,531 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fc0ccb3-8d8a-0fae-2969-7ef6a7e107bb/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-16T20:36:15,531 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fc0ccb3-8d8a-0fae-2969-7ef6a7e107bb/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-16T20:36:15,531 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fc0ccb3-8d8a-0fae-2969-7ef6a7e107bb/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-16T20:36:15,531 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fc0ccb3-8d8a-0fae-2969-7ef6a7e107bb/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-16T20:36:15,531 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fc0ccb3-8d8a-0fae-2969-7ef6a7e107bb/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-16T20:36:15,531 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fc0ccb3-8d8a-0fae-2969-7ef6a7e107bb/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-16T20:36:15,531 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fc0ccb3-8d8a-0fae-2969-7ef6a7e107bb/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-16T20:36:15,531 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fc0ccb3-8d8a-0fae-2969-7ef6a7e107bb/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-16T20:36:15,531 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fc0ccb3-8d8a-0fae-2969-7ef6a7e107bb/nfs.dump.dir in system properties and HBase conf 2024-11-16T20:36:15,531 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fc0ccb3-8d8a-0fae-2969-7ef6a7e107bb/java.io.tmpdir in system properties and HBase conf 2024-11-16T20:36:15,531 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fc0ccb3-8d8a-0fae-2969-7ef6a7e107bb/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-16T20:36:15,531 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fc0ccb3-8d8a-0fae-2969-7ef6a7e107bb/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-16T20:36:15,531 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fc0ccb3-8d8a-0fae-2969-7ef6a7e107bb/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-16T20:36:15,544 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-16T20:36:15,633 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:36:15,634 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:36:15,890 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T20:36:15,895 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T20:36:15,896 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T20:36:15,896 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T20:36:15,896 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-16T20:36:15,897 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T20:36:15,898 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7096145a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fc0ccb3-8d8a-0fae-2969-7ef6a7e107bb/hadoop.log.dir/,AVAILABLE} 2024-11-16T20:36:15,898 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@10c583a0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T20:36:16,009 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@232fa1ae{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fc0ccb3-8d8a-0fae-2969-7ef6a7e107bb/java.io.tmpdir/jetty-localhost-42109-hadoop-hdfs-3_4_1-tests_jar-_-any-15260191440024019307/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-16T20:36:16,010 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5c07fc0{HTTP/1.1, (http/1.1)}{localhost:42109} 2024-11-16T20:36:16,010 INFO [Time-limited test {}] server.Server(415): Started @157842ms 2024-11-16T20:36:16,024 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-16T20:36:16,286 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T20:36:16,290 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T20:36:16,290 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T20:36:16,290 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T20:36:16,290 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-16T20:36:16,294 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@13646a74{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fc0ccb3-8d8a-0fae-2969-7ef6a7e107bb/hadoop.log.dir/,AVAILABLE} 2024-11-16T20:36:16,295 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@301612e4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T20:36:16,402 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@44e11af1{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fc0ccb3-8d8a-0fae-2969-7ef6a7e107bb/java.io.tmpdir/jetty-localhost-45833-hadoop-hdfs-3_4_1-tests_jar-_-any-9253381389777145868/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T20:36:16,402 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@60043bb8{HTTP/1.1, (http/1.1)}{localhost:45833} 2024-11-16T20:36:16,403 INFO [Time-limited test {}] server.Server(415): Started @158235ms 2024-11-16T20:36:16,404 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T20:36:16,434 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T20:36:16,438 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T20:36:16,438 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T20:36:16,439 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T20:36:16,439 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-16T20:36:16,439 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@47946b20{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fc0ccb3-8d8a-0fae-2969-7ef6a7e107bb/hadoop.log.dir/,AVAILABLE} 2024-11-16T20:36:16,440 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@23accf28{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T20:36:16,549 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@77c83f30{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fc0ccb3-8d8a-0fae-2969-7ef6a7e107bb/java.io.tmpdir/jetty-localhost-35721-hadoop-hdfs-3_4_1-tests_jar-_-any-237067174070925758/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T20:36:16,550 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@54dbaae8{HTTP/1.1, (http/1.1)}{localhost:35721} 2024-11-16T20:36:16,550 INFO [Time-limited test {}] server.Server(415): Started @158382ms 2024-11-16T20:36:16,551 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T20:36:16,634 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:36:16,634 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:36:17,635 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:36:17,635 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:36:18,177 WARN [Thread-1199 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fc0ccb3-8d8a-0fae-2969-7ef6a7e107bb/cluster_b84409d9-577f-5fbd-0002-d56360c32b89/data/data1/current/BP-1928493638-172.17.0.2-1731789375556/current, will proceed with Du for space computation calculation, 2024-11-16T20:36:18,178 WARN [Thread-1200 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fc0ccb3-8d8a-0fae-2969-7ef6a7e107bb/cluster_b84409d9-577f-5fbd-0002-d56360c32b89/data/data2/current/BP-1928493638-172.17.0.2-1731789375556/current, will proceed with Du for space computation calculation, 2024-11-16T20:36:18,197 WARN [Thread-1163 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T20:36:18,199 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf331e2cfdb7abb99 with lease ID 0x821d7c8d982e6715: Processing first storage report for DS-88cf6506-870c-44f5-8caf-ba15ba12dde8 from datanode DatanodeRegistration(127.0.0.1:36627, datanodeUuid=0cca2dae-91fe-4aff-bf61-df0664cf3d5f, infoPort=37537, infoSecurePort=0, ipcPort=32997, storageInfo=lv=-57;cid=testClusterID;nsid=777324421;c=1731789375556) 2024-11-16T20:36:18,199 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf331e2cfdb7abb99 with lease ID 0x821d7c8d982e6715: from storage DS-88cf6506-870c-44f5-8caf-ba15ba12dde8 node DatanodeRegistration(127.0.0.1:36627, datanodeUuid=0cca2dae-91fe-4aff-bf61-df0664cf3d5f, infoPort=37537, infoSecurePort=0, ipcPort=32997, storageInfo=lv=-57;cid=testClusterID;nsid=777324421;c=1731789375556), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T20:36:18,200 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf331e2cfdb7abb99 with lease ID 0x821d7c8d982e6715: Processing first storage report for DS-ccf448de-e303-4fc2-8ab2-81785fd6f6ce from datanode DatanodeRegistration(127.0.0.1:36627, datanodeUuid=0cca2dae-91fe-4aff-bf61-df0664cf3d5f, infoPort=37537, infoSecurePort=0, ipcPort=32997, storageInfo=lv=-57;cid=testClusterID;nsid=777324421;c=1731789375556) 2024-11-16T20:36:18,200 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf331e2cfdb7abb99 with lease ID 0x821d7c8d982e6715: from storage DS-ccf448de-e303-4fc2-8ab2-81785fd6f6ce node DatanodeRegistration(127.0.0.1:36627, datanodeUuid=0cca2dae-91fe-4aff-bf61-df0664cf3d5f, infoPort=37537, infoSecurePort=0, ipcPort=32997, storageInfo=lv=-57;cid=testClusterID;nsid=777324421;c=1731789375556), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T20:36:18,304 WARN [Thread-1210 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fc0ccb3-8d8a-0fae-2969-7ef6a7e107bb/cluster_b84409d9-577f-5fbd-0002-d56360c32b89/data/data3/current/BP-1928493638-172.17.0.2-1731789375556/current, will proceed with Du for space computation calculation, 2024-11-16T20:36:18,304 WARN [Thread-1211 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fc0ccb3-8d8a-0fae-2969-7ef6a7e107bb/cluster_b84409d9-577f-5fbd-0002-d56360c32b89/data/data4/current/BP-1928493638-172.17.0.2-1731789375556/current, will proceed with Du for space computation calculation, 2024-11-16T20:36:18,326 WARN [Thread-1186 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T20:36:18,328 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xde7a4cd05cd06ca3 with lease ID 0x821d7c8d982e6716: Processing first storage report for DS-1dcaeddf-649b-40fc-a1fe-2e2365f836c6 from datanode DatanodeRegistration(127.0.0.1:39397, datanodeUuid=946683c1-2007-470d-9f92-4184b2d8bc02, infoPort=39659, infoSecurePort=0, ipcPort=41301, storageInfo=lv=-57;cid=testClusterID;nsid=777324421;c=1731789375556) 2024-11-16T20:36:18,328 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xde7a4cd05cd06ca3 with lease ID 0x821d7c8d982e6716: from storage DS-1dcaeddf-649b-40fc-a1fe-2e2365f836c6 node DatanodeRegistration(127.0.0.1:39397, datanodeUuid=946683c1-2007-470d-9f92-4184b2d8bc02, infoPort=39659, infoSecurePort=0, ipcPort=41301, storageInfo=lv=-57;cid=testClusterID;nsid=777324421;c=1731789375556), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T20:36:18,328 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xde7a4cd05cd06ca3 with lease ID 0x821d7c8d982e6716: Processing first storage report for DS-1f3c77fe-5c4b-4065-8150-9bbcc51eef86 from datanode DatanodeRegistration(127.0.0.1:39397, datanodeUuid=946683c1-2007-470d-9f92-4184b2d8bc02, infoPort=39659, infoSecurePort=0, ipcPort=41301, storageInfo=lv=-57;cid=testClusterID;nsid=777324421;c=1731789375556) 2024-11-16T20:36:18,328 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xde7a4cd05cd06ca3 with lease ID 0x821d7c8d982e6716: from storage DS-1f3c77fe-5c4b-4065-8150-9bbcc51eef86 node DatanodeRegistration(127.0.0.1:39397, datanodeUuid=946683c1-2007-470d-9f92-4184b2d8bc02, infoPort=39659, infoSecurePort=0, ipcPort=41301, storageInfo=lv=-57;cid=testClusterID;nsid=777324421;c=1731789375556), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-16T20:36:18,392 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fc0ccb3-8d8a-0fae-2969-7ef6a7e107bb 2024-11-16T20:36:18,395 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fc0ccb3-8d8a-0fae-2969-7ef6a7e107bb/cluster_b84409d9-577f-5fbd-0002-d56360c32b89/zookeeper_0, clientPort=63539, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fc0ccb3-8d8a-0fae-2969-7ef6a7e107bb/cluster_b84409d9-577f-5fbd-0002-d56360c32b89/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fc0ccb3-8d8a-0fae-2969-7ef6a7e107bb/cluster_b84409d9-577f-5fbd-0002-d56360c32b89/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-16T20:36:18,396 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=63539 2024-11-16T20:36:18,396 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T20:36:18,397 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T20:36:18,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36627 is added to blk_1073741825_1001 (size=7) 2024-11-16T20:36:18,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39397 is added to blk_1073741825_1001 (size=7) 2024-11-16T20:36:18,411 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1 with version=8 2024-11-16T20:36:18,411 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/hbase-staging 2024-11-16T20:36:18,413 INFO [Time-limited test {}] client.ConnectionUtils(128): master/40c018648b21:0 server-side Connection retries=45 2024-11-16T20:36:18,414 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T20:36:18,414 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-16T20:36:18,414 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-16T20:36:18,414 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T20:36:18,414 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-16T20:36:18,414 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-16T20:36:18,414 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-16T20:36:18,415 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42331 2024-11-16T20:36:18,417 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:42331 connecting to ZooKeeper ensemble=127.0.0.1:63539 2024-11-16T20:36:18,471 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:423310x0, quorum=127.0.0.1:63539, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-16T20:36:18,472 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:42331-0x101455ccf050000 connected 2024-11-16T20:36:18,554 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T20:36:18,555 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T20:36:18,557 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42331-0x101455ccf050000, quorum=127.0.0.1:63539, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T20:36:18,558 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1, hbase.cluster.distributed=false 2024-11-16T20:36:18,559 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42331-0x101455ccf050000, quorum=127.0.0.1:63539, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-16T20:36:18,560 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42331 2024-11-16T20:36:18,560 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42331 2024-11-16T20:36:18,560 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42331 2024-11-16T20:36:18,564 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42331 2024-11-16T20:36:18,564 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42331 2024-11-16T20:36:18,581 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/40c018648b21:0 server-side Connection retries=45 2024-11-16T20:36:18,581 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T20:36:18,581 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-16T20:36:18,581 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-16T20:36:18,581 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T20:36:18,581 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-16T20:36:18,582 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-16T20:36:18,582 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-16T20:36:18,582 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:36051 2024-11-16T20:36:18,585 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:36051 connecting to ZooKeeper ensemble=127.0.0.1:63539 2024-11-16T20:36:18,586 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T20:36:18,588 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T20:36:18,601 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:360510x0, quorum=127.0.0.1:63539, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-16T20:36:18,601 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:36051-0x101455ccf050001 connected 2024-11-16T20:36:18,601 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36051-0x101455ccf050001, quorum=127.0.0.1:63539, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T20:36:18,602 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-16T20:36:18,603 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-16T20:36:18,604 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36051-0x101455ccf050001, quorum=127.0.0.1:63539, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-16T20:36:18,605 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36051-0x101455ccf050001, quorum=127.0.0.1:63539, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-16T20:36:18,605 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36051 2024-11-16T20:36:18,605 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36051 2024-11-16T20:36:18,606 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36051 2024-11-16T20:36:18,606 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36051 2024-11-16T20:36:18,606 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36051 2024-11-16T20:36:18,619 DEBUG [M:0;40c018648b21:42331 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;40c018648b21:42331 2024-11-16T20:36:18,619 INFO [master/40c018648b21:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/40c018648b21,42331,1731789378413 2024-11-16T20:36:18,632 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36051-0x101455ccf050001, quorum=127.0.0.1:63539, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T20:36:18,632 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42331-0x101455ccf050000, quorum=127.0.0.1:63539, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T20:36:18,633 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42331-0x101455ccf050000, quorum=127.0.0.1:63539, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/40c018648b21,42331,1731789378413 2024-11-16T20:36:18,636 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:36:18,636 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:36:18,643 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36051-0x101455ccf050001, quorum=127.0.0.1:63539, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-16T20:36:18,643 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42331-0x101455ccf050000, quorum=127.0.0.1:63539, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:36:18,643 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36051-0x101455ccf050001, quorum=127.0.0.1:63539, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:36:18,643 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42331-0x101455ccf050000, quorum=127.0.0.1:63539, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-16T20:36:18,644 INFO [master/40c018648b21:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/40c018648b21,42331,1731789378413 from backup master directory 2024-11-16T20:36:18,653 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36051-0x101455ccf050001, quorum=127.0.0.1:63539, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T20:36:18,653 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42331-0x101455ccf050000, quorum=127.0.0.1:63539, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/40c018648b21,42331,1731789378413 2024-11-16T20:36:18,653 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42331-0x101455ccf050000, quorum=127.0.0.1:63539, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T20:36:18,654 WARN [master/40c018648b21:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-16T20:36:18,654 INFO [master/40c018648b21:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=40c018648b21,42331,1731789378413 2024-11-16T20:36:18,660 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/hbase.id] with ID: 9affd050-c51f-4bd4-ba87-a4ee4a54451d 2024-11-16T20:36:18,660 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/.tmp/hbase.id 2024-11-16T20:36:18,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39397 is added to blk_1073741826_1002 (size=42) 2024-11-16T20:36:18,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36627 is added to blk_1073741826_1002 (size=42) 2024-11-16T20:36:18,668 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/.tmp/hbase.id]:[hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/hbase.id] 2024-11-16T20:36:18,679 INFO [master/40c018648b21:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T20:36:18,680 INFO [master/40c018648b21:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-16T20:36:18,681 INFO [master/40c018648b21:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-16T20:36:18,690 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36051-0x101455ccf050001, quorum=127.0.0.1:63539, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:36:18,690 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42331-0x101455ccf050000, quorum=127.0.0.1:63539, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:36:18,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36627 is added to blk_1073741827_1003 (size=196) 2024-11-16T20:36:18,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39397 is added to blk_1073741827_1003 (size=196) 2024-11-16T20:36:18,701 INFO [master/40c018648b21:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-16T20:36:18,702 INFO [master/40c018648b21:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-16T20:36:18,703 INFO [master/40c018648b21:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T20:36:18,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39397 is added to blk_1073741828_1004 (size=1189) 2024-11-16T20:36:18,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36627 is added to blk_1073741828_1004 (size=1189) 2024-11-16T20:36:18,719 INFO [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/MasterData/data/master/store 2024-11-16T20:36:18,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39397 is added to blk_1073741829_1005 (size=34) 2024-11-16T20:36:18,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36627 is added to blk_1073741829_1005 (size=34) 2024-11-16T20:36:18,728 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T20:36:18,729 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-16T20:36:18,729 INFO [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T20:36:18,729 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T20:36:18,729 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-16T20:36:18,729 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T20:36:18,729 INFO [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T20:36:18,729 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731789378729Disabling compacts and flushes for region at 1731789378729Disabling writes for close at 1731789378729Writing region close event to WAL at 1731789378729Closed at 1731789378729 2024-11-16T20:36:18,730 WARN [master/40c018648b21:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/MasterData/data/master/store/.initializing 2024-11-16T20:36:18,730 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/MasterData/WALs/40c018648b21,42331,1731789378413 2024-11-16T20:36:18,732 INFO [master/40c018648b21:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=40c018648b21%2C42331%2C1731789378413, suffix=, logDir=hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/MasterData/WALs/40c018648b21,42331,1731789378413, archiveDir=hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/MasterData/oldWALs, maxLogs=10 2024-11-16T20:36:18,733 INFO [master/40c018648b21:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 40c018648b21%2C42331%2C1731789378413.1731789378733 2024-11-16T20:36:18,738 INFO [master/40c018648b21:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/MasterData/WALs/40c018648b21,42331,1731789378413/40c018648b21%2C42331%2C1731789378413.1731789378733 2024-11-16T20:36:18,738 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39659:39659),(127.0.0.1/127.0.0.1:37537:37537)] 2024-11-16T20:36:18,739 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-16T20:36:18,739 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T20:36:18,739 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T20:36:18,739 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T20:36:18,741 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T20:36:18,742 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-16T20:36:18,742 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:36:18,742 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T20:36:18,743 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T20:36:18,744 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-16T20:36:18,744 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:36:18,744 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T20:36:18,744 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T20:36:18,746 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-16T20:36:18,746 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:36:18,746 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T20:36:18,746 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T20:36:18,748 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-16T20:36:18,748 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:36:18,748 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T20:36:18,748 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T20:36:18,749 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-16T20:36:18,749 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-16T20:36:18,751 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T20:36:18,751 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T20:36:18,751 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-16T20:36:18,752 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T20:36:18,755 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T20:36:18,755 INFO [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=775492, jitterRate=-0.013911217451095581}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-16T20:36:18,756 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731789378739Initializing all the Stores at 1731789378740 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731789378740Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731789378740Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731789378741 (+1 ms)Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731789378741Cleaning up temporary data from old regions at 1731789378751 (+10 ms)Region opened successfully at 1731789378756 (+5 ms) 2024-11-16T20:36:18,756 INFO [master/40c018648b21:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-16T20:36:18,760 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4e88c643, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=40c018648b21/172.17.0.2:0 2024-11-16T20:36:18,760 INFO [master/40c018648b21:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-16T20:36:18,761 INFO [master/40c018648b21:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-16T20:36:18,761 INFO [master/40c018648b21:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-16T20:36:18,761 INFO [master/40c018648b21:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-16T20:36:18,761 INFO [master/40c018648b21:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-16T20:36:18,762 INFO [master/40c018648b21:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-16T20:36:18,762 INFO [master/40c018648b21:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-16T20:36:18,764 INFO [master/40c018648b21:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-16T20:36:18,765 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42331-0x101455ccf050000, quorum=127.0.0.1:63539, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-16T20:36:18,774 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-16T20:36:18,775 INFO [master/40c018648b21:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-16T20:36:18,775 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42331-0x101455ccf050000, quorum=127.0.0.1:63539, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-16T20:36:18,788 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-16T20:36:18,788 INFO [master/40c018648b21:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-16T20:36:18,790 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42331-0x101455ccf050000, quorum=127.0.0.1:63539, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-16T20:36:18,800 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-16T20:36:18,802 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42331-0x101455ccf050000, quorum=127.0.0.1:63539, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-16T20:36:18,811 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-16T20:36:18,814 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42331-0x101455ccf050000, quorum=127.0.0.1:63539, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-16T20:36:18,821 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-16T20:36:18,832 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42331-0x101455ccf050000, quorum=127.0.0.1:63539, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-16T20:36:18,832 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36051-0x101455ccf050001, quorum=127.0.0.1:63539, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-16T20:36:18,832 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36051-0x101455ccf050001, quorum=127.0.0.1:63539, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:36:18,832 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42331-0x101455ccf050000, quorum=127.0.0.1:63539, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:36:18,833 INFO [master/40c018648b21:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=40c018648b21,42331,1731789378413, sessionid=0x101455ccf050000, setting cluster-up flag (Was=false) 2024-11-16T20:36:18,853 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42331-0x101455ccf050000, quorum=127.0.0.1:63539, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:36:18,853 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36051-0x101455ccf050001, quorum=127.0.0.1:63539, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:36:18,885 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-16T20:36:18,886 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=40c018648b21,42331,1731789378413 2024-11-16T20:36:18,906 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36051-0x101455ccf050001, quorum=127.0.0.1:63539, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:36:18,906 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42331-0x101455ccf050000, quorum=127.0.0.1:63539, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:36:18,937 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-16T20:36:18,938 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=40c018648b21,42331,1731789378413 2024-11-16T20:36:18,940 INFO [master/40c018648b21:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-16T20:36:18,942 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-16T20:36:18,942 INFO [master/40c018648b21:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-16T20:36:18,942 INFO [master/40c018648b21:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-16T20:36:18,942 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 40c018648b21,42331,1731789378413 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-16T20:36:18,944 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/40c018648b21:0, corePoolSize=5, maxPoolSize=5 2024-11-16T20:36:18,944 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/40c018648b21:0, corePoolSize=5, maxPoolSize=5 2024-11-16T20:36:18,944 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/40c018648b21:0, corePoolSize=5, maxPoolSize=5 2024-11-16T20:36:18,944 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/40c018648b21:0, corePoolSize=5, maxPoolSize=5 2024-11-16T20:36:18,944 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/40c018648b21:0, corePoolSize=10, maxPoolSize=10 2024-11-16T20:36:18,944 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:36:18,944 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/40c018648b21:0, corePoolSize=2, maxPoolSize=2 2024-11-16T20:36:18,944 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:36:18,945 INFO [master/40c018648b21:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731789408945 2024-11-16T20:36:18,945 INFO [master/40c018648b21:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-16T20:36:18,945 INFO [master/40c018648b21:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-16T20:36:18,946 INFO [master/40c018648b21:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-16T20:36:18,946 INFO [master/40c018648b21:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-16T20:36:18,946 INFO [master/40c018648b21:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-16T20:36:18,946 INFO [master/40c018648b21:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-16T20:36:18,946 INFO [master/40c018648b21:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-16T20:36:18,946 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T20:36:18,946 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-16T20:36:18,946 INFO [master/40c018648b21:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-16T20:36:18,946 INFO [master/40c018648b21:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-16T20:36:18,947 INFO [master/40c018648b21:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-16T20:36:18,947 INFO [master/40c018648b21:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-16T20:36:18,947 INFO [master/40c018648b21:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-16T20:36:18,947 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/40c018648b21:0:becomeActiveMaster-HFileCleaner.large.0-1731789378947,5,FailOnTimeoutGroup] 2024-11-16T20:36:18,947 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:36:18,947 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/40c018648b21:0:becomeActiveMaster-HFileCleaner.small.0-1731789378947,5,FailOnTimeoutGroup] 2024-11-16T20:36:18,947 INFO [master/40c018648b21:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-16T20:36:18,948 INFO [master/40c018648b21:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-16T20:36:18,948 INFO [master/40c018648b21:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-16T20:36:18,947 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-16T20:36:18,948 INFO [master/40c018648b21:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-16T20:36:18,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39397 is added to blk_1073741831_1007 (size=1321) 2024-11-16T20:36:18,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36627 is added to blk_1073741831_1007 (size=1321) 2024-11-16T20:36:18,958 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-16T20:36:18,959 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1 2024-11-16T20:36:18,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39397 is added to blk_1073741832_1008 (size=32) 2024-11-16T20:36:18,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36627 is added to blk_1073741832_1008 (size=32) 2024-11-16T20:36:18,970 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T20:36:18,972 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-16T20:36:18,974 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-16T20:36:18,974 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:36:18,975 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T20:36:18,975 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-16T20:36:18,976 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-16T20:36:18,976 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:36:18,977 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T20:36:18,977 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-16T20:36:18,978 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-16T20:36:18,978 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:36:18,978 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T20:36:18,979 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-16T20:36:18,980 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-16T20:36:18,980 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:36:18,980 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T20:36:18,980 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-16T20:36:18,981 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/data/hbase/meta/1588230740 2024-11-16T20:36:18,981 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/data/hbase/meta/1588230740 2024-11-16T20:36:18,982 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-16T20:36:18,982 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-16T20:36:18,983 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-16T20:36:18,984 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-16T20:36:18,986 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T20:36:18,987 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=791105, jitterRate=0.005942508578300476}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-16T20:36:18,988 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731789378970Initializing all the Stores at 1731789378971 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731789378971Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731789378972 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731789378972Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731789378972Cleaning up temporary data from old regions at 1731789378982 (+10 ms)Region opened successfully at 1731789378988 (+6 ms) 2024-11-16T20:36:18,988 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-16T20:36:18,988 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-16T20:36:18,988 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-16T20:36:18,988 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-16T20:36:18,988 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-16T20:36:18,988 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-16T20:36:18,988 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731789378988Disabling compacts and flushes for region at 1731789378988Disabling writes for close at 1731789378988Writing region close event to WAL at 1731789378988Closed at 1731789378988 2024-11-16T20:36:18,990 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T20:36:18,990 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-16T20:36:18,990 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-16T20:36:18,991 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-16T20:36:18,993 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-16T20:36:19,008 INFO [RS:0;40c018648b21:36051 {}] regionserver.HRegionServer(746): ClusterId : 9affd050-c51f-4bd4-ba87-a4ee4a54451d 2024-11-16T20:36:19,008 DEBUG [RS:0;40c018648b21:36051 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-16T20:36:19,017 DEBUG [RS:0;40c018648b21:36051 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-16T20:36:19,017 DEBUG [RS:0;40c018648b21:36051 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-16T20:36:19,028 DEBUG [RS:0;40c018648b21:36051 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-16T20:36:19,028 DEBUG [RS:0;40c018648b21:36051 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@37fdcc12, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=40c018648b21/172.17.0.2:0 2024-11-16T20:36:19,040 DEBUG [RS:0;40c018648b21:36051 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;40c018648b21:36051 2024-11-16T20:36:19,041 INFO [RS:0;40c018648b21:36051 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-16T20:36:19,041 INFO [RS:0;40c018648b21:36051 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-16T20:36:19,041 DEBUG [RS:0;40c018648b21:36051 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-16T20:36:19,041 INFO [RS:0;40c018648b21:36051 {}] regionserver.HRegionServer(2659): reportForDuty to master=40c018648b21,42331,1731789378413 with port=36051, startcode=1731789378581 2024-11-16T20:36:19,041 DEBUG [RS:0;40c018648b21:36051 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-16T20:36:19,044 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33763, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-11-16T20:36:19,045 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42331 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 40c018648b21,36051,1731789378581 2024-11-16T20:36:19,045 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42331 {}] master.ServerManager(517): Registering regionserver=40c018648b21,36051,1731789378581 2024-11-16T20:36:19,046 DEBUG [RS:0;40c018648b21:36051 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1 2024-11-16T20:36:19,047 DEBUG [RS:0;40c018648b21:36051 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:45765 2024-11-16T20:36:19,047 DEBUG [RS:0;40c018648b21:36051 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-16T20:36:19,058 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42331-0x101455ccf050000, quorum=127.0.0.1:63539, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-16T20:36:19,059 DEBUG [RS:0;40c018648b21:36051 {}] zookeeper.ZKUtil(111): regionserver:36051-0x101455ccf050001, quorum=127.0.0.1:63539, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/40c018648b21,36051,1731789378581 2024-11-16T20:36:19,059 WARN [RS:0;40c018648b21:36051 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-16T20:36:19,059 INFO [RS:0;40c018648b21:36051 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T20:36:19,059 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [40c018648b21,36051,1731789378581] 2024-11-16T20:36:19,060 DEBUG [RS:0;40c018648b21:36051 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/WALs/40c018648b21,36051,1731789378581 2024-11-16T20:36:19,063 INFO [RS:0;40c018648b21:36051 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-16T20:36:19,065 INFO [RS:0;40c018648b21:36051 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-16T20:36:19,067 INFO [RS:0;40c018648b21:36051 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-16T20:36:19,067 INFO [RS:0;40c018648b21:36051 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T20:36:19,068 INFO [RS:0;40c018648b21:36051 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-16T20:36:19,069 INFO [RS:0;40c018648b21:36051 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-16T20:36:19,069 INFO [RS:0;40c018648b21:36051 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-16T20:36:19,069 DEBUG [RS:0;40c018648b21:36051 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:36:19,069 DEBUG [RS:0;40c018648b21:36051 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:36:19,069 DEBUG [RS:0;40c018648b21:36051 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:36:19,069 DEBUG [RS:0;40c018648b21:36051 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:36:19,069 DEBUG [RS:0;40c018648b21:36051 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:36:19,069 DEBUG [RS:0;40c018648b21:36051 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/40c018648b21:0, corePoolSize=2, maxPoolSize=2 2024-11-16T20:36:19,069 DEBUG [RS:0;40c018648b21:36051 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:36:19,070 DEBUG [RS:0;40c018648b21:36051 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:36:19,070 DEBUG [RS:0;40c018648b21:36051 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:36:19,070 DEBUG [RS:0;40c018648b21:36051 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:36:19,070 DEBUG [RS:0;40c018648b21:36051 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:36:19,070 DEBUG [RS:0;40c018648b21:36051 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:36:19,070 DEBUG [RS:0;40c018648b21:36051 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/40c018648b21:0, corePoolSize=3, maxPoolSize=3 2024-11-16T20:36:19,070 DEBUG [RS:0;40c018648b21:36051 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/40c018648b21:0, corePoolSize=3, maxPoolSize=3 2024-11-16T20:36:19,076 INFO [RS:0;40c018648b21:36051 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T20:36:19,076 INFO [RS:0;40c018648b21:36051 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T20:36:19,076 INFO [RS:0;40c018648b21:36051 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T20:36:19,077 INFO [RS:0;40c018648b21:36051 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-16T20:36:19,077 INFO [RS:0;40c018648b21:36051 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-16T20:36:19,077 INFO [RS:0;40c018648b21:36051 {}] hbase.ChoreService(168): Chore ScheduledChore name=40c018648b21,36051,1731789378581-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-16T20:36:19,095 INFO [RS:0;40c018648b21:36051 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-16T20:36:19,095 INFO [RS:0;40c018648b21:36051 {}] hbase.ChoreService(168): Chore ScheduledChore name=40c018648b21,36051,1731789378581-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T20:36:19,095 INFO [RS:0;40c018648b21:36051 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T20:36:19,096 INFO [RS:0;40c018648b21:36051 {}] regionserver.Replication(171): 40c018648b21,36051,1731789378581 started 2024-11-16T20:36:19,113 INFO [RS:0;40c018648b21:36051 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T20:36:19,113 INFO [RS:0;40c018648b21:36051 {}] regionserver.HRegionServer(1482): Serving as 40c018648b21,36051,1731789378581, RpcServer on 40c018648b21/172.17.0.2:36051, sessionid=0x101455ccf050001 2024-11-16T20:36:19,114 DEBUG [RS:0;40c018648b21:36051 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-16T20:36:19,114 DEBUG [RS:0;40c018648b21:36051 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 40c018648b21,36051,1731789378581 2024-11-16T20:36:19,114 DEBUG [RS:0;40c018648b21:36051 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '40c018648b21,36051,1731789378581' 2024-11-16T20:36:19,114 DEBUG [RS:0;40c018648b21:36051 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-16T20:36:19,114 DEBUG [RS:0;40c018648b21:36051 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-16T20:36:19,115 DEBUG [RS:0;40c018648b21:36051 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-16T20:36:19,115 DEBUG [RS:0;40c018648b21:36051 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-16T20:36:19,115 DEBUG [RS:0;40c018648b21:36051 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 40c018648b21,36051,1731789378581 2024-11-16T20:36:19,115 DEBUG [RS:0;40c018648b21:36051 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '40c018648b21,36051,1731789378581' 2024-11-16T20:36:19,115 DEBUG [RS:0;40c018648b21:36051 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-16T20:36:19,115 DEBUG [RS:0;40c018648b21:36051 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-16T20:36:19,116 DEBUG [RS:0;40c018648b21:36051 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-16T20:36:19,116 INFO [RS:0;40c018648b21:36051 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-16T20:36:19,116 INFO [RS:0;40c018648b21:36051 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-16T20:36:19,143 WARN [40c018648b21:42331 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-16T20:36:19,218 INFO [RS:0;40c018648b21:36051 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=40c018648b21%2C36051%2C1731789378581, suffix=, logDir=hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/WALs/40c018648b21,36051,1731789378581, archiveDir=hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/oldWALs, maxLogs=32 2024-11-16T20:36:19,219 INFO [RS:0;40c018648b21:36051 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 40c018648b21%2C36051%2C1731789378581.1731789379218 2024-11-16T20:36:19,224 INFO [RS:0;40c018648b21:36051 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/WALs/40c018648b21,36051,1731789378581/40c018648b21%2C36051%2C1731789378581.1731789379218 2024-11-16T20:36:19,225 DEBUG [RS:0;40c018648b21:36051 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39659:39659),(127.0.0.1/127.0.0.1:37537:37537)] 2024-11-16T20:36:19,393 DEBUG [40c018648b21:42331 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-16T20:36:19,394 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=40c018648b21,36051,1731789378581 2024-11-16T20:36:19,395 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 40c018648b21,36051,1731789378581, state=OPENING 2024-11-16T20:36:19,448 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-16T20:36:19,458 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36051-0x101455ccf050001, quorum=127.0.0.1:63539, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:36:19,458 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42331-0x101455ccf050000, quorum=127.0.0.1:63539, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:36:19,459 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-16T20:36:19,459 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T20:36:19,459 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T20:36:19,459 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=40c018648b21,36051,1731789378581}] 2024-11-16T20:36:19,614 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-16T20:36:19,616 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36067, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-16T20:36:19,620 INFO [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-16T20:36:19,620 INFO [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T20:36:19,622 INFO [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=40c018648b21%2C36051%2C1731789378581.meta, suffix=.meta, logDir=hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/WALs/40c018648b21,36051,1731789378581, archiveDir=hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/oldWALs, maxLogs=32 2024-11-16T20:36:19,623 INFO [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 40c018648b21%2C36051%2C1731789378581.meta.1731789379623.meta 2024-11-16T20:36:19,629 INFO [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/WALs/40c018648b21,36051,1731789378581/40c018648b21%2C36051%2C1731789378581.meta.1731789379623.meta 2024-11-16T20:36:19,636 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37537:37537),(127.0.0.1/127.0.0.1:39659:39659)] 2024-11-16T20:36:19,636 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:36:19,636 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:36:19,642 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-16T20:36:19,642 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-16T20:36:19,642 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-16T20:36:19,642 INFO [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-16T20:36:19,642 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-16T20:36:19,642 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T20:36:19,643 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-16T20:36:19,643 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-16T20:36:19,644 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-16T20:36:19,645 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-16T20:36:19,645 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:36:19,646 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T20:36:19,646 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-16T20:36:19,647 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-16T20:36:19,647 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:36:19,647 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T20:36:19,647 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-16T20:36:19,648 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-16T20:36:19,648 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:36:19,649 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T20:36:19,649 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-16T20:36:19,650 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-16T20:36:19,650 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:36:19,650 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T20:36:19,650 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-16T20:36:19,651 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/data/hbase/meta/1588230740 2024-11-16T20:36:19,652 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/data/hbase/meta/1588230740 2024-11-16T20:36:19,653 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-16T20:36:19,654 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-16T20:36:19,654 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-16T20:36:19,656 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-16T20:36:19,656 INFO [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=827948, jitterRate=0.052790865302085876}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-16T20:36:19,657 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-16T20:36:19,657 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731789379643Writing region info on filesystem at 1731789379643Initializing all the Stores at 1731789379644 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731789379644Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731789379644Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731789379644Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731789379644Cleaning up temporary data from old regions at 1731789379654 (+10 ms)Running coprocessor post-open hooks at 1731789379657 (+3 ms)Region opened successfully at 1731789379657 2024-11-16T20:36:19,658 INFO [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731789379614 2024-11-16T20:36:19,661 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-16T20:36:19,661 INFO [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-16T20:36:19,662 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=40c018648b21,36051,1731789378581 2024-11-16T20:36:19,663 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 40c018648b21,36051,1731789378581, state=OPEN 2024-11-16T20:36:19,707 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42331-0x101455ccf050000, quorum=127.0.0.1:63539, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-16T20:36:19,707 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36051-0x101455ccf050001, quorum=127.0.0.1:63539, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-16T20:36:19,707 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=40c018648b21,36051,1731789378581 2024-11-16T20:36:19,707 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T20:36:19,707 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T20:36:19,711 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-16T20:36:19,711 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=40c018648b21,36051,1731789378581 in 248 msec 2024-11-16T20:36:19,715 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-16T20:36:19,715 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 721 msec 2024-11-16T20:36:19,716 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T20:36:19,716 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-16T20:36:19,718 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T20:36:19,719 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=40c018648b21,36051,1731789378581, seqNum=-1] 2024-11-16T20:36:19,719 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T20:36:19,720 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51449, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T20:36:19,727 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 785 msec 2024-11-16T20:36:19,727 INFO [master/40c018648b21:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731789379727, completionTime=-1 2024-11-16T20:36:19,727 INFO [master/40c018648b21:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-16T20:36:19,727 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-16T20:36:19,729 INFO [master/40c018648b21:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-16T20:36:19,729 INFO [master/40c018648b21:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731789439729 2024-11-16T20:36:19,729 INFO [master/40c018648b21:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731789499729 2024-11-16T20:36:19,729 INFO [master/40c018648b21:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-16T20:36:19,730 INFO [master/40c018648b21:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=40c018648b21,42331,1731789378413-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T20:36:19,730 INFO [master/40c018648b21:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=40c018648b21,42331,1731789378413-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T20:36:19,730 INFO [master/40c018648b21:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=40c018648b21,42331,1731789378413-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T20:36:19,730 INFO [master/40c018648b21:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-40c018648b21:42331, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T20:36:19,730 INFO [master/40c018648b21:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-16T20:36:19,730 INFO [master/40c018648b21:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-16T20:36:19,732 DEBUG [master/40c018648b21:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-16T20:36:19,734 INFO [master/40c018648b21:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.080sec 2024-11-16T20:36:19,735 INFO [master/40c018648b21:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-16T20:36:19,735 INFO [master/40c018648b21:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-16T20:36:19,735 INFO [master/40c018648b21:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-16T20:36:19,735 INFO [master/40c018648b21:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-16T20:36:19,735 INFO [master/40c018648b21:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-16T20:36:19,735 INFO [master/40c018648b21:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=40c018648b21,42331,1731789378413-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-16T20:36:19,735 INFO [master/40c018648b21:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=40c018648b21,42331,1731789378413-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-16T20:36:19,737 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-16T20:36:19,737 INFO [master/40c018648b21:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-16T20:36:19,738 INFO [master/40c018648b21:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=40c018648b21,42331,1731789378413-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T20:36:19,809 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@370b05c4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T20:36:19,809 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 40c018648b21,42331,-1 for getting cluster id 2024-11-16T20:36:19,809 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-16T20:36:19,811 DEBUG [HMaster-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '9affd050-c51f-4bd4-ba87-a4ee4a54451d' 2024-11-16T20:36:19,811 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-16T20:36:19,811 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "9affd050-c51f-4bd4-ba87-a4ee4a54451d" 2024-11-16T20:36:19,812 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@20c39088, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T20:36:19,812 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [40c018648b21,42331,-1] 2024-11-16T20:36:19,812 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-16T20:36:19,812 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T20:36:19,814 INFO [HMaster-EventLoopGroup-10-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50996, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-16T20:36:19,815 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@55b48cd0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T20:36:19,816 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T20:36:19,817 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=40c018648b21,36051,1731789378581, seqNum=-1] 2024-11-16T20:36:19,817 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T20:36:19,819 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56378, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T20:36:19,822 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=40c018648b21,42331,1731789378413 2024-11-16T20:36:19,822 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T20:36:19,825 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-16T20:36:19,825 INFO [Time-limited test {}] wal.TestLogRolling(320): Starting testLogRollOnPipelineRestart 2024-11-16T20:36:19,825 INFO [Time-limited test {}] wal.TestLogRolling(323): Replication=2 2024-11-16T20:36:19,826 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-16T20:36:19,827 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.AsyncConnectionImpl(321): The fetched master address is 40c018648b21,42331,1731789378413 2024-11-16T20:36:19,827 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@57f2ab11 2024-11-16T20:36:19,827 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-16T20:36:19,830 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51008, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-16T20:36:19,831 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42331 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-16T20:36:19,831 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42331 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-16T20:36:19,831 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42331 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnPipelineRestart', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-16T20:36:19,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42331 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart 2024-11-16T20:36:19,834 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_PRE_OPERATION 2024-11-16T20:36:19,834 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:36:19,834 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42331 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnPipelineRestart" procId is: 4 2024-11-16T20:36:19,836 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-16T20:36:19,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42331 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-16T20:36:19,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36627 is added to blk_1073741835_1011 (size=395) 2024-11-16T20:36:19,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39397 is added to blk_1073741835_1011 (size=395) 2024-11-16T20:36:19,853 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => a319cf76b4566a3b4ce089a3e379c327, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1731789379831.a319cf76b4566a3b4ce089a3e379c327.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnPipelineRestart', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1 2024-11-16T20:36:19,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39397 is added to blk_1073741836_1012 (size=78) 2024-11-16T20:36:19,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36627 is added to blk_1073741836_1012 (size=78) 2024-11-16T20:36:19,862 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1731789379831.a319cf76b4566a3b4ce089a3e379c327.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T20:36:19,862 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1722): Closing a319cf76b4566a3b4ce089a3e379c327, disabling compactions & flushes 2024-11-16T20:36:19,862 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1731789379831.a319cf76b4566a3b4ce089a3e379c327. 2024-11-16T20:36:19,863 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731789379831.a319cf76b4566a3b4ce089a3e379c327. 2024-11-16T20:36:19,863 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731789379831.a319cf76b4566a3b4ce089a3e379c327. after waiting 0 ms 2024-11-16T20:36:19,863 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1731789379831.a319cf76b4566a3b4ce089a3e379c327. 2024-11-16T20:36:19,863 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731789379831.a319cf76b4566a3b4ce089a3e379c327. 2024-11-16T20:36:19,863 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1676): Region close journal for a319cf76b4566a3b4ce089a3e379c327: Waiting for close lock at 1731789379862Disabling compacts and flushes for region at 1731789379862Disabling writes for close at 1731789379863 (+1 ms)Writing region close event to WAL at 1731789379863Closed at 1731789379863 2024-11-16T20:36:19,864 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ADD_TO_META 2024-11-16T20:36:19,865 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnPipelineRestart,,1731789379831.a319cf76b4566a3b4ce089a3e379c327.","families":{"info":[{"qualifier":"regioninfo","vlen":77,"tag":[],"timestamp":"1731789379864"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731789379864"}]},"ts":"1731789379864"} 2024-11-16T20:36:19,867 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-16T20:36:19,868 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-16T20:36:19,868 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731789379868"}]},"ts":"1731789379868"} 2024-11-16T20:36:19,871 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLING in hbase:meta 2024-11-16T20:36:19,871 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=a319cf76b4566a3b4ce089a3e379c327, ASSIGN}] 2024-11-16T20:36:19,873 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=a319cf76b4566a3b4ce089a3e379c327, ASSIGN 2024-11-16T20:36:19,874 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=a319cf76b4566a3b4ce089a3e379c327, ASSIGN; state=OFFLINE, location=40c018648b21,36051,1731789378581; forceNewPlan=false, retain=false 2024-11-16T20:36:20,025 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=a319cf76b4566a3b4ce089a3e379c327, regionState=OPENING, regionLocation=40c018648b21,36051,1731789378581 2024-11-16T20:36:20,028 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=a319cf76b4566a3b4ce089a3e379c327, ASSIGN because future has completed 2024-11-16T20:36:20,028 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure a319cf76b4566a3b4ce089a3e379c327, server=40c018648b21,36051,1731789378581}] 2024-11-16T20:36:20,185 INFO [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnPipelineRestart,,1731789379831.a319cf76b4566a3b4ce089a3e379c327. 2024-11-16T20:36:20,186 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => a319cf76b4566a3b4ce089a3e379c327, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1731789379831.a319cf76b4566a3b4ce089a3e379c327.', STARTKEY => '', ENDKEY => ''} 2024-11-16T20:36:20,186 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnPipelineRestart a319cf76b4566a3b4ce089a3e379c327 2024-11-16T20:36:20,186 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1731789379831.a319cf76b4566a3b4ce089a3e379c327.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T20:36:20,186 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for a319cf76b4566a3b4ce089a3e379c327 2024-11-16T20:36:20,186 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for a319cf76b4566a3b4ce089a3e379c327 2024-11-16T20:36:20,187 INFO [StoreOpener-a319cf76b4566a3b4ce089a3e379c327-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region a319cf76b4566a3b4ce089a3e379c327 2024-11-16T20:36:20,189 INFO [StoreOpener-a319cf76b4566a3b4ce089a3e379c327-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a319cf76b4566a3b4ce089a3e379c327 columnFamilyName info 2024-11-16T20:36:20,189 DEBUG [StoreOpener-a319cf76b4566a3b4ce089a3e379c327-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:36:20,190 INFO [StoreOpener-a319cf76b4566a3b4ce089a3e379c327-1 {}] regionserver.HStore(327): Store=a319cf76b4566a3b4ce089a3e379c327/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T20:36:20,190 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for a319cf76b4566a3b4ce089a3e379c327 2024-11-16T20:36:20,190 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/data/default/TestLogRolling-testLogRollOnPipelineRestart/a319cf76b4566a3b4ce089a3e379c327 2024-11-16T20:36:20,191 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/data/default/TestLogRolling-testLogRollOnPipelineRestart/a319cf76b4566a3b4ce089a3e379c327 2024-11-16T20:36:20,191 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for a319cf76b4566a3b4ce089a3e379c327 2024-11-16T20:36:20,191 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for a319cf76b4566a3b4ce089a3e379c327 2024-11-16T20:36:20,193 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for a319cf76b4566a3b4ce089a3e379c327 2024-11-16T20:36:20,195 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/data/default/TestLogRolling-testLogRollOnPipelineRestart/a319cf76b4566a3b4ce089a3e379c327/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T20:36:20,196 INFO [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened a319cf76b4566a3b4ce089a3e379c327; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=799644, jitterRate=0.01680077612400055}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-16T20:36:20,196 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for a319cf76b4566a3b4ce089a3e379c327 2024-11-16T20:36:20,197 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for a319cf76b4566a3b4ce089a3e379c327: Running coprocessor pre-open hook at 1731789380186Writing region info on filesystem at 1731789380186Initializing all the Stores at 1731789380187 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731789380187Cleaning up temporary data from old regions at 1731789380191 (+4 ms)Running coprocessor post-open hooks at 1731789380196 (+5 ms)Region opened successfully at 1731789380197 (+1 ms) 2024-11-16T20:36:20,198 INFO [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnPipelineRestart,,1731789379831.a319cf76b4566a3b4ce089a3e379c327., pid=6, masterSystemTime=1731789380181 2024-11-16T20:36:20,200 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnPipelineRestart,,1731789379831.a319cf76b4566a3b4ce089a3e379c327. 2024-11-16T20:36:20,200 INFO [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnPipelineRestart,,1731789379831.a319cf76b4566a3b4ce089a3e379c327. 2024-11-16T20:36:20,201 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=a319cf76b4566a3b4ce089a3e379c327, regionState=OPEN, openSeqNum=2, regionLocation=40c018648b21,36051,1731789378581 2024-11-16T20:36:20,204 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure a319cf76b4566a3b4ce089a3e379c327, server=40c018648b21,36051,1731789378581 because future has completed 2024-11-16T20:36:20,208 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-16T20:36:20,208 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure a319cf76b4566a3b4ce089a3e379c327, server=40c018648b21,36051,1731789378581 in 177 msec 2024-11-16T20:36:20,211 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-16T20:36:20,211 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=a319cf76b4566a3b4ce089a3e379c327, ASSIGN in 337 msec 2024-11-16T20:36:20,212 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-16T20:36:20,212 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731789380212"}]},"ts":"1731789380212"} 2024-11-16T20:36:20,214 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLED in hbase:meta 2024-11-16T20:36:20,216 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_POST_OPERATION 2024-11-16T20:36:20,218 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart in 385 msec 2024-11-16T20:36:20,637 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:36:20,637 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:36:20,646 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-16T20:36:20,667 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:36:20,668 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:36:20,668 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:36:20,668 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:36:20,668 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:36:20,669 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:36:20,673 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:36:20,673 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:36:20,673 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:36:20,675 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:36:21,638 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:36:21,638 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:36:22,638 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:36:22,638 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:36:23,639 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:36:23,639 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:36:24,640 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:36:24,640 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:36:25,063 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-16T20:36:25,064 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnPipelineRestart' 2024-11-16T20:36:25,143 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-16T20:36:25,143 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-16T20:36:25,145 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-16T20:36:25,145 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart Metrics about Tables on a single HBase RegionServer 2024-11-16T20:36:25,146 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-16T20:36:25,146 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-16T20:36:25,146 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-16T20:36:25,146 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-16T20:36:25,641 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:36:25,641 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:36:25,689 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-16T20:36:25,715 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:36:25,715 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:36:25,716 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:36:25,716 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:36:25,716 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:36:25,717 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:36:25,723 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:36:25,723 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:36:25,724 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:36:25,727 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:36:26,642 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:36:26,642 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:36:27,642 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:36:27,642 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:36:28,643 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:36:28,643 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:36:29,644 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:36:29,644 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:36:29,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42331 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-16T20:36:29,914 INFO [RPCClient-NioEventLoopGroup-4-15 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnPipelineRestart completed 2024-11-16T20:36:29,914 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnPipelineRestart,, stopping at row=TestLogRolling-testLogRollOnPipelineRestart ,, for max=2147483647 with caching=100 2024-11-16T20:36:29,919 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnPipelineRestart 2024-11-16T20:36:29,919 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnPipelineRestart,,1731789379831.a319cf76b4566a3b4ce089a3e379c327. 2024-11-16T20:36:29,924 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnPipelineRestart', row='row1002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnPipelineRestart,,1731789379831.a319cf76b4566a3b4ce089a3e379c327., hostname=40c018648b21,36051,1731789378581, seqNum=2] 2024-11-16T20:36:30,644 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:36:30,644 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:36:31,645 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:36:31,645 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:36:31,927 INFO [Time-limited test {}] wal.TestLogRolling(360): log.getCurrentFileName()): hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/WALs/40c018648b21,36051,1731789378581/40c018648b21%2C36051%2C1731789378581.1731789379218 2024-11-16T20:36:31,928 WARN [ResponseProcessor for block BP-1928493638-172.17.0.2-1731789375556:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1928493638-172.17.0.2-1731789375556:blk_1073741830_1006 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:36:31,928 WARN [ResponseProcessor for block BP-1928493638-172.17.0.2-1731789375556:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1928493638-172.17.0.2-1731789375556:blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:36:31,928 WARN [ResponseProcessor for block BP-1928493638-172.17.0.2-1731789375556:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1928493638-172.17.0.2-1731789375556:blk_1073741834_1010 java.io.IOException: Bad response ERROR for BP-1928493638-172.17.0.2-1731789375556:blk_1073741834_1010 from datanode DatanodeInfoWithStorage[127.0.0.1:39397,DS-1dcaeddf-649b-40fc-a1fe-2e2365f836c6,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:36:31,928 WARN [DataStreamer for file /user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/MasterData/WALs/40c018648b21,42331,1731789378413/40c018648b21%2C42331%2C1731789378413.1731789378733 block BP-1928493638-172.17.0.2-1731789375556:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1928493638-172.17.0.2-1731789375556:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39397,DS-1dcaeddf-649b-40fc-a1fe-2e2365f836c6,DISK], DatanodeInfoWithStorage[127.0.0.1:36627,DS-88cf6506-870c-44f5-8caf-ba15ba12dde8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39397,DS-1dcaeddf-649b-40fc-a1fe-2e2365f836c6,DISK]) is bad. 2024-11-16T20:36:31,929 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1507403653_22 at /127.0.0.1:36520 [Receiving block BP-1928493638-172.17.0.2-1731789375556:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:39397:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36520 dst: /127.0.0.1:39397 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T20:36:31,929 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1507403653_22 at /127.0.0.1:51364 [Receiving block BP-1928493638-172.17.0.2-1731789375556:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:36627:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51364 dst: /127.0.0.1:36627 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T20:36:31,930 WARN [DataStreamer for file /user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/WALs/40c018648b21,36051,1731789378581/40c018648b21%2C36051%2C1731789378581.meta.1731789379623.meta block BP-1928493638-172.17.0.2-1731789375556:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1928493638-172.17.0.2-1731789375556:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36627,DS-88cf6506-870c-44f5-8caf-ba15ba12dde8,DISK], DatanodeInfoWithStorage[127.0.0.1:39397,DS-1dcaeddf-649b-40fc-a1fe-2e2365f836c6,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39397,DS-1dcaeddf-649b-40fc-a1fe-2e2365f836c6,DISK]) is bad. 2024-11-16T20:36:31,930 WARN [DataStreamer for file /user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/WALs/40c018648b21,36051,1731789378581/40c018648b21%2C36051%2C1731789378581.1731789379218 block BP-1928493638-172.17.0.2-1731789375556:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1928493638-172.17.0.2-1731789375556:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39397,DS-1dcaeddf-649b-40fc-a1fe-2e2365f836c6,DISK], DatanodeInfoWithStorage[127.0.0.1:36627,DS-88cf6506-870c-44f5-8caf-ba15ba12dde8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39397,DS-1dcaeddf-649b-40fc-a1fe-2e2365f836c6,DISK]) is bad. 2024-11-16T20:36:31,930 WARN [PacketResponder: BP-1928493638-172.17.0.2-1731789375556:blk_1073741834_1010, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:39397] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Broken pipe at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T20:36:31,930 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_622408538_22 at /127.0.0.1:36498 [Receiving block BP-1928493638-172.17.0.2-1731789375556:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:39397:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36498 dst: /127.0.0.1:39397 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T20:36:31,930 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1507403653_22 at /127.0.0.1:51372 [Receiving block BP-1928493638-172.17.0.2-1731789375556:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:36627:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51372 dst: /127.0.0.1:36627 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T20:36:31,930 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1507403653_22 at /127.0.0.1:36522 [Receiving block BP-1928493638-172.17.0.2-1731789375556:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:39397:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36522 dst: /127.0.0.1:39397 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T20:36:31,931 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_622408538_22 at /127.0.0.1:51316 [Receiving block BP-1928493638-172.17.0.2-1731789375556:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:36627:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51316 dst: /127.0.0.1:36627 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T20:36:31,946 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@77c83f30{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T20:36:31,946 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@54dbaae8{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T20:36:31,946 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T20:36:31,946 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@23accf28{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T20:36:31,946 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@47946b20{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fc0ccb3-8d8a-0fae-2969-7ef6a7e107bb/hadoop.log.dir/,STOPPED} 2024-11-16T20:36:31,948 WARN [BP-1928493638-172.17.0.2-1731789375556 heartbeating to localhost/127.0.0.1:45765 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T20:36:31,948 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T20:36:31,948 WARN [BP-1928493638-172.17.0.2-1731789375556 heartbeating to localhost/127.0.0.1:45765 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1928493638-172.17.0.2-1731789375556 (Datanode Uuid 946683c1-2007-470d-9f92-4184b2d8bc02) service to localhost/127.0.0.1:45765 2024-11-16T20:36:31,948 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T20:36:31,948 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fc0ccb3-8d8a-0fae-2969-7ef6a7e107bb/cluster_b84409d9-577f-5fbd-0002-d56360c32b89/data/data3/current/BP-1928493638-172.17.0.2-1731789375556 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T20:36:31,948 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fc0ccb3-8d8a-0fae-2969-7ef6a7e107bb/cluster_b84409d9-577f-5fbd-0002-d56360c32b89/data/data4/current/BP-1928493638-172.17.0.2-1731789375556 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T20:36:31,949 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T20:36:31,957 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T20:36:31,962 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T20:36:31,963 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T20:36:31,963 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T20:36:31,963 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-16T20:36:31,964 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@75f58649{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fc0ccb3-8d8a-0fae-2969-7ef6a7e107bb/hadoop.log.dir/,AVAILABLE} 2024-11-16T20:36:31,964 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@571de0fd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T20:36:32,066 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@45604664{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fc0ccb3-8d8a-0fae-2969-7ef6a7e107bb/java.io.tmpdir/jetty-localhost-38923-hadoop-hdfs-3_4_1-tests_jar-_-any-13261220015472684562/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T20:36:32,067 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6e793ffb{HTTP/1.1, (http/1.1)}{localhost:38923} 2024-11-16T20:36:32,067 INFO [Time-limited test {}] server.Server(415): Started @173899ms 2024-11-16T20:36:32,069 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T20:36:32,086 WARN [ResponseProcessor for block BP-1928493638-172.17.0.2-1731789375556:blk_1073741830_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1928493638-172.17.0.2-1731789375556:blk_1073741830_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:36:32,086 WARN [ResponseProcessor for block BP-1928493638-172.17.0.2-1731789375556:blk_1073741834_1014 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1928493638-172.17.0.2-1731789375556:blk_1073741834_1014 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:36:32,086 WARN [ResponseProcessor for block BP-1928493638-172.17.0.2-1731789375556:blk_1073741833_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1928493638-172.17.0.2-1731789375556:blk_1073741833_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:36:32,087 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1507403653_22 at /127.0.0.1:39590 [Receiving block BP-1928493638-172.17.0.2-1731789375556:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:36627:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39590 dst: /127.0.0.1:36627 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T20:36:32,087 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_622408538_22 at /127.0.0.1:39562 [Receiving block BP-1928493638-172.17.0.2-1731789375556:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:36627:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39562 dst: /127.0.0.1:36627 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T20:36:32,087 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1507403653_22 at /127.0.0.1:39576 [Receiving block BP-1928493638-172.17.0.2-1731789375556:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:36627:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39576 dst: /127.0.0.1:36627 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T20:36:32,096 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@44e11af1{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T20:36:32,096 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@60043bb8{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T20:36:32,096 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T20:36:32,096 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@301612e4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T20:36:32,096 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@13646a74{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fc0ccb3-8d8a-0fae-2969-7ef6a7e107bb/hadoop.log.dir/,STOPPED} 2024-11-16T20:36:32,097 WARN [BP-1928493638-172.17.0.2-1731789375556 heartbeating to localhost/127.0.0.1:45765 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T20:36:32,097 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T20:36:32,097 WARN [BP-1928493638-172.17.0.2-1731789375556 heartbeating to localhost/127.0.0.1:45765 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1928493638-172.17.0.2-1731789375556 (Datanode Uuid 0cca2dae-91fe-4aff-bf61-df0664cf3d5f) service to localhost/127.0.0.1:45765 2024-11-16T20:36:32,097 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T20:36:32,098 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fc0ccb3-8d8a-0fae-2969-7ef6a7e107bb/cluster_b84409d9-577f-5fbd-0002-d56360c32b89/data/data1/current/BP-1928493638-172.17.0.2-1731789375556 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T20:36:32,098 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fc0ccb3-8d8a-0fae-2969-7ef6a7e107bb/cluster_b84409d9-577f-5fbd-0002-d56360c32b89/data/data2/current/BP-1928493638-172.17.0.2-1731789375556 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T20:36:32,098 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T20:36:32,107 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T20:36:32,110 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T20:36:32,111 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T20:36:32,111 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T20:36:32,111 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-16T20:36:32,111 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1ede944f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fc0ccb3-8d8a-0fae-2969-7ef6a7e107bb/hadoop.log.dir/,AVAILABLE} 2024-11-16T20:36:32,112 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@17312068{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T20:36:32,220 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7ff0f915{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fc0ccb3-8d8a-0fae-2969-7ef6a7e107bb/java.io.tmpdir/jetty-localhost-43057-hadoop-hdfs-3_4_1-tests_jar-_-any-1223534454361073936/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T20:36:32,221 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@58a9274b{HTTP/1.1, (http/1.1)}{localhost:43057} 2024-11-16T20:36:32,221 INFO [Time-limited test {}] server.Server(415): Started @174053ms 2024-11-16T20:36:32,222 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T20:36:32,646 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:36:32,646 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:36:32,744 WARN [Thread-1335 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T20:36:32,746 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1c803ae2c73b1baa with lease ID 0x821d7c8d982e6717: from storage DS-1dcaeddf-649b-40fc-a1fe-2e2365f836c6 node DatanodeRegistration(127.0.0.1:33605, datanodeUuid=946683c1-2007-470d-9f92-4184b2d8bc02, infoPort=33723, infoSecurePort=0, ipcPort=32915, storageInfo=lv=-57;cid=testClusterID;nsid=777324421;c=1731789375556), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T20:36:32,747 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1c803ae2c73b1baa with lease ID 0x821d7c8d982e6717: from storage DS-1f3c77fe-5c4b-4065-8150-9bbcc51eef86 node DatanodeRegistration(127.0.0.1:33605, datanodeUuid=946683c1-2007-470d-9f92-4184b2d8bc02, infoPort=33723, infoSecurePort=0, ipcPort=32915, storageInfo=lv=-57;cid=testClusterID;nsid=777324421;c=1731789375556), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T20:36:32,841 WARN [Thread-1355 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T20:36:32,843 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe62410db0eebd42f with lease ID 0x821d7c8d982e6718: from storage DS-88cf6506-870c-44f5-8caf-ba15ba12dde8 node DatanodeRegistration(127.0.0.1:40095, datanodeUuid=0cca2dae-91fe-4aff-bf61-df0664cf3d5f, infoPort=37991, infoSecurePort=0, ipcPort=37225, storageInfo=lv=-57;cid=testClusterID;nsid=777324421;c=1731789375556), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T20:36:32,844 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe62410db0eebd42f with lease ID 0x821d7c8d982e6718: from storage DS-ccf448de-e303-4fc2-8ab2-81785fd6f6ce node DatanodeRegistration(127.0.0.1:40095, datanodeUuid=0cca2dae-91fe-4aff-bf61-df0664cf3d5f, infoPort=37991, infoSecurePort=0, ipcPort=37225, storageInfo=lv=-57;cid=testClusterID;nsid=777324421;c=1731789375556), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T20:36:33,244 INFO [Time-limited test {}] wal.TestLogRolling(372): Data Nodes restarted 2024-11-16T20:36:33,248 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1002 2024-11-16T20:36:33,250 ERROR [FSHLog-0-hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1-prefix:40c018648b21,36051,1731789378581 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36627,DS-88cf6506-870c-44f5-8caf-ba15ba12dde8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:36:33,250 WARN [FSHLog-0-hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1-prefix:40c018648b21,36051,1731789378581 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36627,DS-88cf6506-870c-44f5-8caf-ba15ba12dde8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:36:33,251 DEBUG [regionserver/40c018648b21:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 40c018648b21%2C36051%2C1731789378581:(num 1731789379218) roll requested 2024-11-16T20:36:33,251 INFO [regionserver/40c018648b21:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 40c018648b21%2C36051%2C1731789378581.1731789393251 2024-11-16T20:36:33,258 DEBUG [regionserver/40c018648b21:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/WALs/40c018648b21,36051,1731789378581/40c018648b21%2C36051%2C1731789378581.1731789379218 newFile=hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/WALs/40c018648b21,36051,1731789378581/40c018648b21%2C36051%2C1731789378581.1731789393251 2024-11-16T20:36:33,258 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:36:33,258 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:36:33,258 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:36:33,258 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:36:33,258 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:36:33,259 INFO [regionserver/40c018648b21:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/WALs/40c018648b21,36051,1731789378581/40c018648b21%2C36051%2C1731789378581.1731789379218 with entries=2, filesize=1.59 KB; new WAL /user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/WALs/40c018648b21,36051,1731789378581/40c018648b21%2C36051%2C1731789378581.1731789393251 2024-11-16T20:36:33,259 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36627,DS-88cf6506-870c-44f5-8caf-ba15ba12dde8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:36:33,259 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36627,DS-88cf6506-870c-44f5-8caf-ba15ba12dde8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:36:33,259 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/WALs/40c018648b21,36051,1731789378581/40c018648b21%2C36051%2C1731789378581.1731789379218 2024-11-16T20:36:33,260 DEBUG [regionserver/40c018648b21:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33723:33723),(127.0.0.1/127.0.0.1:37991:37991)] 2024-11-16T20:36:33,260 DEBUG [regionserver/40c018648b21:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/WALs/40c018648b21,36051,1731789378581/40c018648b21%2C36051%2C1731789378581.1731789379218 is not closed yet, will try archiving it next time 2024-11-16T20:36:33,260 WARN [IPC Server handler 2 on default port 45765 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/WALs/40c018648b21,36051,1731789378581/40c018648b21%2C36051%2C1731789378581.1731789379218 has not been closed. Lease recovery is in progress. RecoveryId = 1017 for block blk_1073741833_1015 2024-11-16T20:36:33,260 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/WALs/40c018648b21,36051,1731789378581/40c018648b21%2C36051%2C1731789378581.1731789379218 after 1ms 2024-11-16T20:36:33,647 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:36:33,647 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:36:34,648 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:36:34,648 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:36:34,747 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741833_1015: GenerationStamp not matched, existing replica is blk_1073741833_1009 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-16T20:36:35,264 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1003 2024-11-16T20:36:35,649 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:36:35,649 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:36:36,650 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:36:36,650 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:36:37,261 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/WALs/40c018648b21,36051,1731789378581/40c018648b21%2C36051%2C1731789378581.1731789379218 after 4002ms 2024-11-16T20:36:37,268 WARN [ResponseProcessor for block BP-1928493638-172.17.0.2-1731789375556:blk_1073741837_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1928493638-172.17.0.2-1731789375556:blk_1073741837_1016 java.io.IOException: Bad response ERROR for BP-1928493638-172.17.0.2-1731789375556:blk_1073741837_1016 from datanode DatanodeInfoWithStorage[127.0.0.1:40095,DS-88cf6506-870c-44f5-8caf-ba15ba12dde8,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:36:37,268 WARN [DataStreamer for file /user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/WALs/40c018648b21,36051,1731789378581/40c018648b21%2C36051%2C1731789378581.1731789393251 block BP-1928493638-172.17.0.2-1731789375556:blk_1073741837_1016 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1928493638-172.17.0.2-1731789375556:blk_1073741837_1016 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33605,DS-1dcaeddf-649b-40fc-a1fe-2e2365f836c6,DISK], DatanodeInfoWithStorage[127.0.0.1:40095,DS-88cf6506-870c-44f5-8caf-ba15ba12dde8,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40095,DS-88cf6506-870c-44f5-8caf-ba15ba12dde8,DISK]) is bad. 2024-11-16T20:36:37,268 WARN [PacketResponder: BP-1928493638-172.17.0.2-1731789375556:blk_1073741837_1016, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:40095] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T20:36:37,269 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1507403653_22 at /127.0.0.1:54880 [Receiving block BP-1928493638-172.17.0.2-1731789375556:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:33605:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54880 dst: /127.0.0.1:33605 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T20:36:37,269 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1507403653_22 at /127.0.0.1:58448 [Receiving block BP-1928493638-172.17.0.2-1731789375556:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:40095:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58448 dst: /127.0.0.1:40095 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T20:36:37,322 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7ff0f915{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T20:36:37,323 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@58a9274b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T20:36:37,323 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T20:36:37,323 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@17312068{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T20:36:37,323 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1ede944f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fc0ccb3-8d8a-0fae-2969-7ef6a7e107bb/hadoop.log.dir/,STOPPED} 2024-11-16T20:36:37,325 WARN [BP-1928493638-172.17.0.2-1731789375556 heartbeating to localhost/127.0.0.1:45765 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T20:36:37,325 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T20:36:37,325 WARN [BP-1928493638-172.17.0.2-1731789375556 heartbeating to localhost/127.0.0.1:45765 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1928493638-172.17.0.2-1731789375556 (Datanode Uuid 0cca2dae-91fe-4aff-bf61-df0664cf3d5f) service to localhost/127.0.0.1:45765 2024-11-16T20:36:37,325 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T20:36:37,325 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fc0ccb3-8d8a-0fae-2969-7ef6a7e107bb/cluster_b84409d9-577f-5fbd-0002-d56360c32b89/data/data1/current/BP-1928493638-172.17.0.2-1731789375556 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T20:36:37,326 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fc0ccb3-8d8a-0fae-2969-7ef6a7e107bb/cluster_b84409d9-577f-5fbd-0002-d56360c32b89/data/data2/current/BP-1928493638-172.17.0.2-1731789375556 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T20:36:37,326 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T20:36:37,334 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T20:36:37,341 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T20:36:37,342 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T20:36:37,342 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T20:36:37,342 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-16T20:36:37,343 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ef50a45{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fc0ccb3-8d8a-0fae-2969-7ef6a7e107bb/hadoop.log.dir/,AVAILABLE} 2024-11-16T20:36:37,343 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6573e60c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T20:36:37,446 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@a8b2822{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fc0ccb3-8d8a-0fae-2969-7ef6a7e107bb/java.io.tmpdir/jetty-localhost-40731-hadoop-hdfs-3_4_1-tests_jar-_-any-6421129493391288534/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T20:36:37,446 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@68df0564{HTTP/1.1, (http/1.1)}{localhost:40731} 2024-11-16T20:36:37,447 INFO [Time-limited test {}] server.Server(415): Started @179279ms 2024-11-16T20:36:37,448 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T20:36:37,468 WARN [ResponseProcessor for block BP-1928493638-172.17.0.2-1731789375556:blk_1073741837_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1928493638-172.17.0.2-1731789375556:blk_1073741837_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:36:37,469 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1507403653_22 at /127.0.0.1:54910 [Receiving block BP-1928493638-172.17.0.2-1731789375556:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:33605:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54910 dst: /127.0.0.1:33605 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T20:36:37,470 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@45604664{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T20:36:37,471 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6e793ffb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T20:36:37,471 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T20:36:37,471 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@571de0fd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T20:36:37,471 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@75f58649{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fc0ccb3-8d8a-0fae-2969-7ef6a7e107bb/hadoop.log.dir/,STOPPED} 2024-11-16T20:36:37,472 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T20:36:37,472 WARN [BP-1928493638-172.17.0.2-1731789375556 heartbeating to localhost/127.0.0.1:45765 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T20:36:37,472 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T20:36:37,472 WARN [BP-1928493638-172.17.0.2-1731789375556 heartbeating to localhost/127.0.0.1:45765 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1928493638-172.17.0.2-1731789375556 (Datanode Uuid 946683c1-2007-470d-9f92-4184b2d8bc02) service to localhost/127.0.0.1:45765 2024-11-16T20:36:37,473 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fc0ccb3-8d8a-0fae-2969-7ef6a7e107bb/cluster_b84409d9-577f-5fbd-0002-d56360c32b89/data/data3/current/BP-1928493638-172.17.0.2-1731789375556 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T20:36:37,473 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fc0ccb3-8d8a-0fae-2969-7ef6a7e107bb/cluster_b84409d9-577f-5fbd-0002-d56360c32b89/data/data4/current/BP-1928493638-172.17.0.2-1731789375556 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T20:36:37,473 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T20:36:37,487 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T20:36:37,490 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T20:36:37,497 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T20:36:37,497 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T20:36:37,497 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-16T20:36:37,500 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5f3e5a16{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fc0ccb3-8d8a-0fae-2969-7ef6a7e107bb/hadoop.log.dir/,AVAILABLE} 2024-11-16T20:36:37,501 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3420abff{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T20:36:37,605 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7c524311{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fc0ccb3-8d8a-0fae-2969-7ef6a7e107bb/java.io.tmpdir/jetty-localhost-37583-hadoop-hdfs-3_4_1-tests_jar-_-any-2145967664685563740/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T20:36:37,606 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@523d3401{HTTP/1.1, (http/1.1)}{localhost:37583} 2024-11-16T20:36:37,606 INFO [Time-limited test {}] server.Server(415): Started @179438ms 2024-11-16T20:36:37,608 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T20:36:37,651 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:36:37,651 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:36:37,957 WARN [Thread-1409 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T20:36:37,959 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4cff60fa3c3ec4e2 with lease ID 0x821d7c8d982e6719: from storage DS-88cf6506-870c-44f5-8caf-ba15ba12dde8 node DatanodeRegistration(127.0.0.1:33431, datanodeUuid=0cca2dae-91fe-4aff-bf61-df0664cf3d5f, infoPort=37753, infoSecurePort=0, ipcPort=43829, storageInfo=lv=-57;cid=testClusterID;nsid=777324421;c=1731789375556), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T20:36:37,959 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4cff60fa3c3ec4e2 with lease ID 0x821d7c8d982e6719: from storage DS-ccf448de-e303-4fc2-8ab2-81785fd6f6ce node DatanodeRegistration(127.0.0.1:33431, datanodeUuid=0cca2dae-91fe-4aff-bf61-df0664cf3d5f, infoPort=37753, infoSecurePort=0, ipcPort=43829, storageInfo=lv=-57;cid=testClusterID;nsid=777324421;c=1731789375556), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T20:36:38,122 WARN [Thread-1429 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T20:36:38,125 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf386c535ee63307e with lease ID 0x821d7c8d982e671a: from storage DS-1dcaeddf-649b-40fc-a1fe-2e2365f836c6 node DatanodeRegistration(127.0.0.1:33935, datanodeUuid=946683c1-2007-470d-9f92-4184b2d8bc02, infoPort=46159, infoSecurePort=0, ipcPort=38067, storageInfo=lv=-57;cid=testClusterID;nsid=777324421;c=1731789375556), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T20:36:38,125 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf386c535ee63307e with lease ID 0x821d7c8d982e671a: from storage DS-1f3c77fe-5c4b-4065-8150-9bbcc51eef86 node DatanodeRegistration(127.0.0.1:33935, datanodeUuid=946683c1-2007-470d-9f92-4184b2d8bc02, infoPort=46159, infoSecurePort=0, ipcPort=38067, storageInfo=lv=-57;cid=testClusterID;nsid=777324421;c=1731789375556), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T20:36:38,626 INFO [Time-limited test {}] wal.TestLogRolling(389): Data Nodes restarted 2024-11-16T20:36:38,628 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1004 2024-11-16T20:36:38,629 ERROR [FSHLog-0-hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1-prefix:40c018648b21,36051,1731789378581 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33605,DS-1dcaeddf-649b-40fc-a1fe-2e2365f836c6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:36:38,629 WARN [FSHLog-0-hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1-prefix:40c018648b21,36051,1731789378581 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33605,DS-1dcaeddf-649b-40fc-a1fe-2e2365f836c6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:36:38,629 DEBUG [regionserver/40c018648b21:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 40c018648b21%2C36051%2C1731789378581:(num 1731789393251) roll requested 2024-11-16T20:36:38,630 INFO [regionserver/40c018648b21:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 40c018648b21%2C36051%2C1731789378581.1731789398629 2024-11-16T20:36:38,637 DEBUG [regionserver/40c018648b21:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/WALs/40c018648b21,36051,1731789378581/40c018648b21%2C36051%2C1731789378581.1731789393251 newFile=hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/WALs/40c018648b21,36051,1731789378581/40c018648b21%2C36051%2C1731789378581.1731789398629 2024-11-16T20:36:38,637 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:36:38,637 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:36:38,638 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:36:38,638 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:36:38,638 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:36:38,638 INFO [regionserver/40c018648b21:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/WALs/40c018648b21,36051,1731789378581/40c018648b21%2C36051%2C1731789378581.1731789393251 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/WALs/40c018648b21,36051,1731789378581/40c018648b21%2C36051%2C1731789378581.1731789398629 2024-11-16T20:36:38,638 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33605,DS-1dcaeddf-649b-40fc-a1fe-2e2365f836c6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:36:38,638 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33605,DS-1dcaeddf-649b-40fc-a1fe-2e2365f836c6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:36:38,638 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/WALs/40c018648b21,36051,1731789378581/40c018648b21%2C36051%2C1731789378581.1731789393251 2024-11-16T20:36:38,639 DEBUG [regionserver/40c018648b21:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37753:37753),(127.0.0.1/127.0.0.1:46159:46159)] 2024-11-16T20:36:38,639 WARN [IPC Server handler 1 on default port 45765 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/WALs/40c018648b21,36051,1731789378581/40c018648b21%2C36051%2C1731789378581.1731789393251 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741837_1018 2024-11-16T20:36:38,639 DEBUG [regionserver/40c018648b21:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/WALs/40c018648b21,36051,1731789378581/40c018648b21%2C36051%2C1731789378581.1731789393251 is not closed yet, will try archiving it next time 2024-11-16T20:36:38,639 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/WALs/40c018648b21,36051,1731789378581/40c018648b21%2C36051%2C1731789378581.1731789393251 after 1ms 2024-11-16T20:36:38,651 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:36:38,651 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:36:39,652 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:36:39,652 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:36:40,641 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 40c018648b21%2C36051%2C1731789378581.1731789400641 2024-11-16T20:36:40,648 DEBUG [Time-limited test {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/WALs/40c018648b21,36051,1731789378581/40c018648b21%2C36051%2C1731789378581.1731789398629 newFile=hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/WALs/40c018648b21,36051,1731789378581/40c018648b21%2C36051%2C1731789378581.1731789400641 2024-11-16T20:36:40,649 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:36:40,649 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:36:40,649 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:36:40,649 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:36:40,649 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:36:40,650 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/WALs/40c018648b21,36051,1731789378581/40c018648b21%2C36051%2C1731789378581.1731789398629 with entries=1, filesize=1.23 KB; new WAL /user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/WALs/40c018648b21,36051,1731789378581/40c018648b21%2C36051%2C1731789378581.1731789400641 2024-11-16T20:36:40,651 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37753:37753),(127.0.0.1/127.0.0.1:46159:46159)] 2024-11-16T20:36:40,651 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/WALs/40c018648b21,36051,1731789378581/40c018648b21%2C36051%2C1731789378581.1731789393251 is not closed yet, will try archiving it next time 2024-11-16T20:36:40,651 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/WALs/40c018648b21,36051,1731789378581/40c018648b21%2C36051%2C1731789378581.1731789398629 is not closed yet, will try archiving it next time 2024-11-16T20:36:40,651 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/WALs/40c018648b21,36051,1731789378581/40c018648b21%2C36051%2C1731789378581.1731789379218 2024-11-16T20:36:40,651 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/WALs/40c018648b21,36051,1731789378581/40c018648b21%2C36051%2C1731789378581.1731789379218 2024-11-16T20:36:40,652 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/WALs/40c018648b21,36051,1731789378581/40c018648b21%2C36051%2C1731789378581.1731789379218 after 1ms 2024-11-16T20:36:40,652 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/WALs/40c018648b21,36051,1731789378581/40c018648b21%2C36051%2C1731789378581.1731789379218 2024-11-16T20:36:40,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33935 is added to blk_1073741838_1019 (size=1264) 2024-11-16T20:36:40,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33431 is added to blk_1073741838_1019 (size=1264) 2024-11-16T20:36:40,653 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/WALs/40c018648b21,36051,1731789378581/40c018648b21%2C36051%2C1731789378581.1731789393251 is not closed yet, will try archiving it next time 2024-11-16T20:36:40,653 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:36:40,653 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:36:40,663 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1731789380197/Put/vlen=218/seqid=0] 2024-11-16T20:36:40,664 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #4: [row1002/info:/1731789389925/Put/vlen=1045/seqid=0] 2024-11-16T20:36:40,664 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/WALs/40c018648b21,36051,1731789378581/40c018648b21%2C36051%2C1731789378581.1731789379218 2024-11-16T20:36:40,664 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/WALs/40c018648b21,36051,1731789378581/40c018648b21%2C36051%2C1731789378581.1731789393251 2024-11-16T20:36:40,664 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/WALs/40c018648b21,36051,1731789378581/40c018648b21%2C36051%2C1731789378581.1731789393251 2024-11-16T20:36:40,664 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/WALs/40c018648b21,36051,1731789378581/40c018648b21%2C36051%2C1731789378581.1731789393251 after 0ms 2024-11-16T20:36:40,664 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/WALs/40c018648b21,36051,1731789378581/40c018648b21%2C36051%2C1731789378581.1731789393251 2024-11-16T20:36:40,668 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #5: [row1003/info:/1731789393250/Put/vlen=1045/seqid=0] 2024-11-16T20:36:40,668 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #6: [row1004/info:/1731789395265/Put/vlen=1045/seqid=0] 2024-11-16T20:36:40,668 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/WALs/40c018648b21,36051,1731789378581/40c018648b21%2C36051%2C1731789378581.1731789393251 2024-11-16T20:36:40,668 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/WALs/40c018648b21,36051,1731789378581/40c018648b21%2C36051%2C1731789378581.1731789398629 2024-11-16T20:36:40,668 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/WALs/40c018648b21,36051,1731789378581/40c018648b21%2C36051%2C1731789378581.1731789398629 2024-11-16T20:36:40,669 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/WALs/40c018648b21,36051,1731789378581/40c018648b21%2C36051%2C1731789378581.1731789398629 after 1ms 2024-11-16T20:36:40,669 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/WALs/40c018648b21,36051,1731789378581/40c018648b21%2C36051%2C1731789378581.1731789398629 2024-11-16T20:36:40,672 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #7: [row1005/info:/1731789398629/Put/vlen=1045/seqid=0] 2024-11-16T20:36:40,672 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/WALs/40c018648b21,36051,1731789378581/40c018648b21%2C36051%2C1731789378581.1731789400641 2024-11-16T20:36:40,672 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/WALs/40c018648b21,36051,1731789378581/40c018648b21%2C36051%2C1731789378581.1731789400641 2024-11-16T20:36:40,673 WARN [IPC Server handler 4 on default port 45765 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/WALs/40c018648b21,36051,1731789378581/40c018648b21%2C36051%2C1731789378581.1731789400641 has not been closed. Lease recovery is in progress. RecoveryId = 1022 for block blk_1073741839_1021 2024-11-16T20:36:40,673 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/WALs/40c018648b21,36051,1731789378581/40c018648b21%2C36051%2C1731789378581.1731789400641 after 0ms 2024-11-16T20:36:40,960 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741837_1018: GenerationStamp not matched, existing replica is blk_1073741837_1016 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-16T20:36:41,131 WARN [ResponseProcessor for block BP-1928493638-172.17.0.2-1731789375556:blk_1073741839_1021 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1928493638-172.17.0.2-1731789375556:blk_1073741839_1021 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:36:41,131 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_622408538_22 at /127.0.0.1:38276 [Receiving block BP-1928493638-172.17.0.2-1731789375556:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:33431:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38276 dst: /127.0.0.1:33431 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:33431 remote=/127.0.0.1:38276]. Total timeout mills is 60000, 59517 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T20:36:41,132 WARN [DataStreamer for file /user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/WALs/40c018648b21,36051,1731789378581/40c018648b21%2C36051%2C1731789378581.1731789400641 block BP-1928493638-172.17.0.2-1731789375556:blk_1073741839_1021 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1928493638-172.17.0.2-1731789375556:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33431,DS-88cf6506-870c-44f5-8caf-ba15ba12dde8,DISK], DatanodeInfoWithStorage[127.0.0.1:33935,DS-1dcaeddf-649b-40fc-a1fe-2e2365f836c6,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33431,DS-88cf6506-870c-44f5-8caf-ba15ba12dde8,DISK]) is bad. 2024-11-16T20:36:41,131 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_622408538_22 at /127.0.0.1:46716 [Receiving block BP-1928493638-172.17.0.2-1731789375556:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:33935:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46716 dst: /127.0.0.1:33935 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T20:36:41,133 WARN [DataStreamer for file /user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/WALs/40c018648b21,36051,1731789378581/40c018648b21%2C36051%2C1731789378581.1731789400641 block BP-1928493638-172.17.0.2-1731789375556:blk_1073741839_1021 {}] hdfs.DataStreamer(859): DataStreamer Exception org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1928493638-172.17.0.2-1731789375556:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:36:41,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33431 is added to blk_1073741839_1022 (size=85) 2024-11-16T20:36:41,653 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:36:41,653 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:36:42,641 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/WALs/40c018648b21,36051,1731789378581/40c018648b21%2C36051%2C1731789378581.1731789393251 after 4002ms 2024-11-16T20:36:42,654 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:36:42,654 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:36:43,655 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:36:43,655 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:36:44,656 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:36:44,656 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:36:44,674 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/WALs/40c018648b21,36051,1731789378581/40c018648b21%2C36051%2C1731789378581.1731789400641 after 4001ms 2024-11-16T20:36:44,674 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/WALs/40c018648b21,36051,1731789378581/40c018648b21%2C36051%2C1731789378581.1731789400641 2024-11-16T20:36:44,678 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/WALs/40c018648b21,36051,1731789378581/40c018648b21%2C36051%2C1731789378581.1731789400641 2024-11-16T20:36:44,679 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.74 KB heapSize=3.77 KB 2024-11-16T20:36:44,679 ERROR [FSHLog-0-hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1-prefix:40c018648b21,36051,1731789378581.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36627,DS-88cf6506-870c-44f5-8caf-ba15ba12dde8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:36:44,679 WARN [FSHLog-0-hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1-prefix:40c018648b21,36051,1731789378581.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36627,DS-88cf6506-870c-44f5-8caf-ba15ba12dde8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:36:44,679 DEBUG [regionserver/40c018648b21:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 40c018648b21%2C36051%2C1731789378581.meta:.meta(num 1731789379623) roll requested 2024-11-16T20:36:44,680 INFO [regionserver/40c018648b21:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 40c018648b21%2C36051%2C1731789378581.meta.1731789404680.meta 2024-11-16T20:36:44,688 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:36:44,688 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:36:44,688 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:36:44,688 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:36:44,688 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:36:44,688 INFO [regionserver/40c018648b21:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/WALs/40c018648b21,36051,1731789378581/40c018648b21%2C36051%2C1731789378581.meta.1731789379623.meta with entries=8, filesize=2.36 KB; new WAL /user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/WALs/40c018648b21,36051,1731789378581/40c018648b21%2C36051%2C1731789378581.meta.1731789404680.meta 2024-11-16T20:36:44,689 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36627,DS-88cf6506-870c-44f5-8caf-ba15ba12dde8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:36:44,689 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36627,DS-88cf6506-870c-44f5-8caf-ba15ba12dde8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:36:44,689 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/WALs/40c018648b21,36051,1731789378581/40c018648b21%2C36051%2C1731789378581.meta.1731789379623.meta 2024-11-16T20:36:44,689 DEBUG [regionserver/40c018648b21:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37753:37753),(127.0.0.1/127.0.0.1:46159:46159)] 2024-11-16T20:36:44,689 DEBUG [regionserver/40c018648b21:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/WALs/40c018648b21,36051,1731789378581/40c018648b21%2C36051%2C1731789378581.meta.1731789379623.meta is not closed yet, will try archiving it next time 2024-11-16T20:36:44,689 WARN [IPC Server handler 1 on default port 45765 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/WALs/40c018648b21,36051,1731789378581/40c018648b21%2C36051%2C1731789378581.meta.1731789379623.meta has not been closed. Lease recovery is in progress. RecoveryId = 1024 for block blk_1073741834_1014 2024-11-16T20:36:44,689 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/WALs/40c018648b21,36051,1731789378581/40c018648b21%2C36051%2C1731789378581.meta.1731789379623.meta after 0ms 2024-11-16T20:36:44,706 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/data/hbase/meta/1588230740/.tmp/info/eacdbcc7651c4418810541b3d289ada5 is 207, key is TestLogRolling-testLogRollOnPipelineRestart,,1731789379831.a319cf76b4566a3b4ce089a3e379c327./info:regioninfo/1731789380201/Put/seqid=0 2024-11-16T20:36:44,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33431 is added to blk_1073741841_1025 (size=7125) 2024-11-16T20:36:44,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33935 is added to blk_1073741841_1025 (size=7125) 2024-11-16T20:36:45,112 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.52 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/data/hbase/meta/1588230740/.tmp/info/eacdbcc7651c4418810541b3d289ada5 2024-11-16T20:36:45,140 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/data/hbase/meta/1588230740/.tmp/ns/88d428a52a3a41c69d69705791a3e58d is 43, key is default/ns:d/1731789379721/Put/seqid=0 2024-11-16T20:36:45,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33431 is added to blk_1073741842_1026 (size=5153) 2024-11-16T20:36:45,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33935 is added to blk_1073741842_1026 (size=5153) 2024-11-16T20:36:45,545 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/data/hbase/meta/1588230740/.tmp/ns/88d428a52a3a41c69d69705791a3e58d 2024-11-16T20:36:45,567 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/data/hbase/meta/1588230740/.tmp/table/66f568f56a4e4de08d07b90520f0d9d1 is 79, key is TestLogRolling-testLogRollOnPipelineRestart/table:state/1731789380212/Put/seqid=0 2024-11-16T20:36:45,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33431 is added to blk_1073741843_1027 (size=5438) 2024-11-16T20:36:45,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33935 is added to blk_1073741843_1027 (size=5438) 2024-11-16T20:36:45,572 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=150 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/data/hbase/meta/1588230740/.tmp/table/66f568f56a4e4de08d07b90520f0d9d1 2024-11-16T20:36:45,578 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/data/hbase/meta/1588230740/.tmp/info/eacdbcc7651c4418810541b3d289ada5 as hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/data/hbase/meta/1588230740/info/eacdbcc7651c4418810541b3d289ada5 2024-11-16T20:36:45,585 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/data/hbase/meta/1588230740/info/eacdbcc7651c4418810541b3d289ada5, entries=10, sequenceid=11, filesize=7.0 K 2024-11-16T20:36:45,586 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/data/hbase/meta/1588230740/.tmp/ns/88d428a52a3a41c69d69705791a3e58d as hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/data/hbase/meta/1588230740/ns/88d428a52a3a41c69d69705791a3e58d 2024-11-16T20:36:45,593 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/data/hbase/meta/1588230740/ns/88d428a52a3a41c69d69705791a3e58d, entries=2, sequenceid=11, filesize=5.0 K 2024-11-16T20:36:45,594 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/data/hbase/meta/1588230740/.tmp/table/66f568f56a4e4de08d07b90520f0d9d1 as hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/data/hbase/meta/1588230740/table/66f568f56a4e4de08d07b90520f0d9d1 2024-11-16T20:36:45,601 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/data/hbase/meta/1588230740/table/66f568f56a4e4de08d07b90520f0d9d1, entries=2, sequenceid=11, filesize=5.3 K 2024-11-16T20:36:45,602 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.74 KB/1782, heapSize ~3.48 KB/3560, currentSize=0 B/0 for 1588230740 in 923ms, sequenceid=11, compaction requested=false 2024-11-16T20:36:45,602 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-16T20:36:45,602 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing a319cf76b4566a3b4ce089a3e379c327 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-11-16T20:36:45,603 ERROR [FSHLog-0-hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1-prefix:40c018648b21,36051,1731789378581 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1928493638-172.17.0.2-1731789375556:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:36:45,603 WARN [FSHLog-0-hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1-prefix:40c018648b21,36051,1731789378581 {}] wal.AbstractFSWAL(2174): append entry failed org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1928493638-172.17.0.2-1731789375556:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:36:45,604 DEBUG [regionserver/40c018648b21:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 40c018648b21%2C36051%2C1731789378581:(num 1731789400641) roll requested 2024-11-16T20:36:45,604 INFO [regionserver/40c018648b21:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 40c018648b21%2C36051%2C1731789378581.1731789405604 2024-11-16T20:36:45,610 DEBUG [regionserver/40c018648b21:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/WALs/40c018648b21,36051,1731789378581/40c018648b21%2C36051%2C1731789378581.1731789400641 newFile=hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/WALs/40c018648b21,36051,1731789378581/40c018648b21%2C36051%2C1731789378581.1731789405604 2024-11-16T20:36:45,610 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:36:45,610 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:36:45,610 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:36:45,610 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:36:45,610 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:36:45,610 INFO [regionserver/40c018648b21:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/WALs/40c018648b21,36051,1731789378581/40c018648b21%2C36051%2C1731789378581.1731789400641 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/WALs/40c018648b21,36051,1731789378581/40c018648b21%2C36051%2C1731789378581.1731789405604 2024-11-16T20:36:45,610 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1928493638-172.17.0.2-1731789375556:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:36:45,611 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1928493638-172.17.0.2-1731789375556:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:36:45,611 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/WALs/40c018648b21,36051,1731789378581/40c018648b21%2C36051%2C1731789378581.1731789400641 2024-11-16T20:36:45,612 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/WALs/40c018648b21,36051,1731789378581/40c018648b21%2C36051%2C1731789378581.1731789400641 after 1ms 2024-11-16T20:36:45,612 DEBUG [regionserver/40c018648b21:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37753:37753),(127.0.0.1/127.0.0.1:46159:46159)] 2024-11-16T20:36:45,612 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/WALs/40c018648b21,36051,1731789378581/40c018648b21%2C36051%2C1731789378581.1731789400641 to hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/oldWALs/40c018648b21%2C36051%2C1731789378581.1731789400641 2024-11-16T20:36:45,630 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/data/default/TestLogRolling-testLogRollOnPipelineRestart/a319cf76b4566a3b4ce089a3e379c327/.tmp/info/7e560df1a8dd4bd8938e6e61c9625c5d is 1080, key is row1002/info:/1731789389925/Put/seqid=0 2024-11-16T20:36:45,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33431 is added to blk_1073741845_1029 (size=9270) 2024-11-16T20:36:45,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33935 is added to blk_1073741845_1029 (size=9270) 2024-11-16T20:36:45,642 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=8 (bloomFilter=true), to=hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/data/default/TestLogRolling-testLogRollOnPipelineRestart/a319cf76b4566a3b4ce089a3e379c327/.tmp/info/7e560df1a8dd4bd8938e6e61c9625c5d 2024-11-16T20:36:45,649 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/data/default/TestLogRolling-testLogRollOnPipelineRestart/a319cf76b4566a3b4ce089a3e379c327/.tmp/info/7e560df1a8dd4bd8938e6e61c9625c5d as hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/data/default/TestLogRolling-testLogRollOnPipelineRestart/a319cf76b4566a3b4ce089a3e379c327/info/7e560df1a8dd4bd8938e6e61c9625c5d 2024-11-16T20:36:45,655 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/data/default/TestLogRolling-testLogRollOnPipelineRestart/a319cf76b4566a3b4ce089a3e379c327/info/7e560df1a8dd4bd8938e6e61c9625c5d, entries=4, sequenceid=8, filesize=9.1 K 2024-11-16T20:36:45,656 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:36:45,656 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:36:45,657 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for a319cf76b4566a3b4ce089a3e379c327 in 54ms, sequenceid=8, compaction requested=false 2024-11-16T20:36:45,657 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for a319cf76b4566a3b4ce089a3e379c327: 2024-11-16T20:36:45,662 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-16T20:36:45,663 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-16T20:36:45,663 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T20:36:45,663 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T20:36:45,663 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T20:36:45,663 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-16T20:36:45,663 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-16T20:36:45,663 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=253472220, stopped=false 2024-11-16T20:36:45,663 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=40c018648b21,42331,1731789378413 2024-11-16T20:36:45,741 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36051-0x101455ccf050001, quorum=127.0.0.1:63539, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-16T20:36:45,741 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42331-0x101455ccf050000, quorum=127.0.0.1:63539, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-16T20:36:45,741 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42331-0x101455ccf050000, quorum=127.0.0.1:63539, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:36:45,741 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36051-0x101455ccf050001, quorum=127.0.0.1:63539, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:36:45,741 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-16T20:36:45,741 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-16T20:36:45,741 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T20:36:45,742 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T20:36:45,742 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:42331-0x101455ccf050000, quorum=127.0.0.1:63539, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T20:36:45,742 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '40c018648b21,36051,1731789378581' ***** 2024-11-16T20:36:45,742 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-16T20:36:45,742 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:36051-0x101455ccf050001, quorum=127.0.0.1:63539, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T20:36:45,742 INFO [RS:0;40c018648b21:36051 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-16T20:36:45,742 INFO [RS:0;40c018648b21:36051 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-16T20:36:45,742 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-16T20:36:45,742 INFO [RS:0;40c018648b21:36051 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-16T20:36:45,742 INFO [RS:0;40c018648b21:36051 {}] regionserver.HRegionServer(3091): Received CLOSE for a319cf76b4566a3b4ce089a3e379c327 2024-11-16T20:36:45,743 INFO [RS:0;40c018648b21:36051 {}] regionserver.HRegionServer(959): stopping server 40c018648b21,36051,1731789378581 2024-11-16T20:36:45,743 INFO [RS:0;40c018648b21:36051 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-16T20:36:45,743 INFO [RS:0;40c018648b21:36051 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;40c018648b21:36051. 2024-11-16T20:36:45,743 DEBUG [RS_CLOSE_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing a319cf76b4566a3b4ce089a3e379c327, disabling compactions & flushes 2024-11-16T20:36:45,743 DEBUG [RS:0;40c018648b21:36051 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T20:36:45,743 INFO [RS_CLOSE_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1731789379831.a319cf76b4566a3b4ce089a3e379c327. 2024-11-16T20:36:45,743 DEBUG [RS:0;40c018648b21:36051 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T20:36:45,743 DEBUG [RS_CLOSE_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731789379831.a319cf76b4566a3b4ce089a3e379c327. 2024-11-16T20:36:45,743 DEBUG [RS_CLOSE_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731789379831.a319cf76b4566a3b4ce089a3e379c327. after waiting 0 ms 2024-11-16T20:36:45,743 INFO [RS:0;40c018648b21:36051 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-16T20:36:45,743 INFO [RS:0;40c018648b21:36051 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-16T20:36:45,743 DEBUG [RS_CLOSE_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1731789379831.a319cf76b4566a3b4ce089a3e379c327. 2024-11-16T20:36:45,743 INFO [RS:0;40c018648b21:36051 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-16T20:36:45,744 INFO [RS:0;40c018648b21:36051 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-16T20:36:45,744 INFO [RS:0;40c018648b21:36051 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-16T20:36:45,744 DEBUG [RS:0;40c018648b21:36051 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, a319cf76b4566a3b4ce089a3e379c327=TestLogRolling-testLogRollOnPipelineRestart,,1731789379831.a319cf76b4566a3b4ce089a3e379c327.} 2024-11-16T20:36:45,744 DEBUG [RS:0;40c018648b21:36051 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, a319cf76b4566a3b4ce089a3e379c327 2024-11-16T20:36:45,744 DEBUG [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-16T20:36:45,744 INFO [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-16T20:36:45,744 DEBUG [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-16T20:36:45,744 DEBUG [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-16T20:36:45,744 DEBUG [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-16T20:36:45,751 DEBUG [RS_CLOSE_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/data/default/TestLogRolling-testLogRollOnPipelineRestart/a319cf76b4566a3b4ce089a3e379c327/recovered.edits/11.seqid, newMaxSeqId=11, maxSeqId=1 2024-11-16T20:36:45,751 DEBUG [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-16T20:36:45,751 INFO [RS_CLOSE_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731789379831.a319cf76b4566a3b4ce089a3e379c327. 2024-11-16T20:36:45,751 DEBUG [RS_CLOSE_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for a319cf76b4566a3b4ce089a3e379c327: Waiting for close lock at 1731789405743Running coprocessor pre-close hooks at 1731789405743Disabling compacts and flushes for region at 1731789405743Disabling writes for close at 1731789405743Writing region close event to WAL at 1731789405746 (+3 ms)Running coprocessor post-close hooks at 1731789405751 (+5 ms)Closed at 1731789405751 2024-11-16T20:36:45,752 DEBUG [RS_CLOSE_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731789379831.a319cf76b4566a3b4ce089a3e379c327. 2024-11-16T20:36:45,752 DEBUG [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-16T20:36:45,752 INFO [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-16T20:36:45,752 DEBUG [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731789405744Running coprocessor pre-close hooks at 1731789405744Disabling compacts and flushes for region at 1731789405744Disabling writes for close at 1731789405744Writing region close event to WAL at 1731789405747 (+3 ms)Running coprocessor post-close hooks at 1731789405752 (+5 ms)Closed at 1731789405752 2024-11-16T20:36:45,752 DEBUG [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-16T20:36:45,944 INFO [RS:0;40c018648b21:36051 {}] regionserver.HRegionServer(976): stopping server 40c018648b21,36051,1731789378581; all regions closed. 2024-11-16T20:36:45,945 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:36:45,945 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:36:45,945 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:36:45,945 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:36:45,945 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:36:45,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33935 is added to blk_1073741840_1023 (size=825) 2024-11-16T20:36:45,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33431 is added to blk_1073741840_1023 (size=825) 2024-11-16T20:36:46,142 INFO [regionserver/40c018648b21:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-16T20:36:46,142 INFO [regionserver/40c018648b21:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-16T20:36:46,657 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:36:46,657 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:36:47,078 INFO [regionserver/40c018648b21:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-16T20:36:47,127 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741834_1014: GenerationStamp not matched, existing replica is blk_1073741834_1010 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-16T20:36:47,658 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:36:47,658 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:36:48,392 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-16T20:36:48,658 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:36:48,658 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:36:48,690 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/WALs/40c018648b21,36051,1731789378581/40c018648b21%2C36051%2C1731789378581.meta.1731789379623.meta after 4001ms 2024-11-16T20:36:48,691 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/WALs/40c018648b21,36051,1731789378581/40c018648b21%2C36051%2C1731789378581.meta.1731789379623.meta to hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/oldWALs/40c018648b21%2C36051%2C1731789378581.meta.1731789379623.meta 2024-11-16T20:36:48,694 DEBUG [RS:0;40c018648b21:36051 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/oldWALs 2024-11-16T20:36:48,694 INFO [RS:0;40c018648b21:36051 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 40c018648b21%2C36051%2C1731789378581.meta:.meta(num 1731789404680) 2024-11-16T20:36:48,694 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:36:48,695 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:36:48,695 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:36:48,695 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:36:48,695 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:36:48,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33935 is added to blk_1073741844_1028 (size=1162) 2024-11-16T20:36:48,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33431 is added to blk_1073741844_1028 (size=1162) 2024-11-16T20:36:48,702 DEBUG [RS:0;40c018648b21:36051 {}] wal.AbstractFSWAL(1256): Moved 4 WAL file(s) to /user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/oldWALs 2024-11-16T20:36:48,702 INFO [RS:0;40c018648b21:36051 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 40c018648b21%2C36051%2C1731789378581:(num 1731789405604) 2024-11-16T20:36:48,702 DEBUG [RS:0;40c018648b21:36051 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T20:36:48,702 INFO [RS:0;40c018648b21:36051 {}] regionserver.LeaseManager(133): Closed leases 2024-11-16T20:36:48,702 INFO [RS:0;40c018648b21:36051 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-16T20:36:48,703 INFO [RS:0;40c018648b21:36051 {}] hbase.ChoreService(370): Chore service for: regionserver/40c018648b21:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-16T20:36:48,703 INFO [RS:0;40c018648b21:36051 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-16T20:36:48,703 INFO [regionserver/40c018648b21:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-16T20:36:48,703 INFO [RS:0;40c018648b21:36051 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:36051 2024-11-16T20:36:48,751 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42331-0x101455ccf050000, quorum=127.0.0.1:63539, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-16T20:36:48,751 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36051-0x101455ccf050001, quorum=127.0.0.1:63539, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/40c018648b21,36051,1731789378581 2024-11-16T20:36:48,751 INFO [RS:0;40c018648b21:36051 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-16T20:36:48,842 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [40c018648b21,36051,1731789378581] 2024-11-16T20:36:48,934 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/40c018648b21,36051,1731789378581 already deleted, retry=false 2024-11-16T20:36:48,935 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 40c018648b21,36051,1731789378581 expired; onlineServers=0 2024-11-16T20:36:48,935 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '40c018648b21,42331,1731789378413' ***** 2024-11-16T20:36:48,935 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-16T20:36:48,935 INFO [M:0;40c018648b21:42331 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-16T20:36:48,935 INFO [M:0;40c018648b21:42331 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-16T20:36:48,936 DEBUG [M:0;40c018648b21:42331 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-16T20:36:48,936 DEBUG [M:0;40c018648b21:42331 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-16T20:36:48,936 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-16T20:36:48,936 DEBUG [master/40c018648b21:0:becomeActiveMaster-HFileCleaner.large.0-1731789378947 {}] cleaner.HFileCleaner(306): Exit Thread[master/40c018648b21:0:becomeActiveMaster-HFileCleaner.large.0-1731789378947,5,FailOnTimeoutGroup] 2024-11-16T20:36:48,936 DEBUG [master/40c018648b21:0:becomeActiveMaster-HFileCleaner.small.0-1731789378947 {}] cleaner.HFileCleaner(306): Exit Thread[master/40c018648b21:0:becomeActiveMaster-HFileCleaner.small.0-1731789378947,5,FailOnTimeoutGroup] 2024-11-16T20:36:48,936 INFO [M:0;40c018648b21:42331 {}] hbase.ChoreService(370): Chore service for: master/40c018648b21:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-16T20:36:48,936 INFO [M:0;40c018648b21:42331 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-16T20:36:48,937 DEBUG [M:0;40c018648b21:42331 {}] master.HMaster(1795): Stopping service threads 2024-11-16T20:36:48,937 INFO [M:0;40c018648b21:42331 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-16T20:36:48,937 INFO [M:0;40c018648b21:42331 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-16T20:36:48,937 INFO [M:0;40c018648b21:42331 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-16T20:36:48,937 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-16T20:36:48,943 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36051-0x101455ccf050001, quorum=127.0.0.1:63539, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T20:36:48,943 INFO [RS:0;40c018648b21:36051 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-16T20:36:48,943 INFO [RS:0;40c018648b21:36051 {}] regionserver.HRegionServer(1031): Exiting; stopping=40c018648b21,36051,1731789378581; zookeeper connection closed. 2024-11-16T20:36:48,943 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36051-0x101455ccf050001, quorum=127.0.0.1:63539, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T20:36:48,943 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1ed73c4f {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1ed73c4f 2024-11-16T20:36:48,943 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-16T20:36:49,088 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42331-0x101455ccf050000, quorum=127.0.0.1:63539, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-16T20:36:49,088 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42331-0x101455ccf050000, quorum=127.0.0.1:63539, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:36:49,088 DEBUG [M:0;40c018648b21:42331 {}] zookeeper.ZKUtil(347): master:42331-0x101455ccf050000, quorum=127.0.0.1:63539, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-16T20:36:49,088 WARN [M:0;40c018648b21:42331 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-16T20:36:49,089 INFO [M:0;40c018648b21:42331 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/.lastflushedseqids 2024-11-16T20:36:49,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33431 is added to blk_1073741846_1030 (size=111) 2024-11-16T20:36:49,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33935 is added to blk_1073741846_1030 (size=111) 2024-11-16T20:36:49,101 INFO [M:0;40c018648b21:42331 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-16T20:36:49,101 INFO [M:0;40c018648b21:42331 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-16T20:36:49,102 DEBUG [M:0;40c018648b21:42331 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-16T20:36:49,102 INFO [M:0;40c018648b21:42331 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T20:36:49,102 DEBUG [M:0;40c018648b21:42331 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T20:36:49,102 DEBUG [M:0;40c018648b21:42331 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-16T20:36:49,102 DEBUG [M:0;40c018648b21:42331 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T20:36:49,102 INFO [M:0;40c018648b21:42331 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.18 KB heapSize=29.16 KB 2024-11-16T20:36:49,102 ERROR [FSHLog-0-hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/MasterData-prefix:40c018648b21,42331,1731789378413 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36627,DS-88cf6506-870c-44f5-8caf-ba15ba12dde8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:36:49,103 WARN [FSHLog-0-hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/MasterData-prefix:40c018648b21,42331,1731789378413 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36627,DS-88cf6506-870c-44f5-8caf-ba15ba12dde8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:36:49,103 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 40c018648b21%2C42331%2C1731789378413:(num 1731789378733) roll requested 2024-11-16T20:36:49,103 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 40c018648b21%2C42331%2C1731789378413.1731789409103 2024-11-16T20:36:49,109 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:36:49,109 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:36:49,109 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:36:49,109 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:36:49,109 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:36:49,109 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/MasterData/WALs/40c018648b21,42331,1731789378413/40c018648b21%2C42331%2C1731789378413.1731789378733 with entries=53, filesize=26.63 KB; new WAL /user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/MasterData/WALs/40c018648b21,42331,1731789378413/40c018648b21%2C42331%2C1731789378413.1731789409103 2024-11-16T20:36:49,109 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36627,DS-88cf6506-870c-44f5-8caf-ba15ba12dde8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:36:49,110 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36627,DS-88cf6506-870c-44f5-8caf-ba15ba12dde8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T20:36:49,110 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/MasterData/WALs/40c018648b21,42331,1731789378413/40c018648b21%2C42331%2C1731789378413.1731789378733 2024-11-16T20:36:49,110 WARN [IPC Server handler 2 on default port 45765 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/MasterData/WALs/40c018648b21,42331,1731789378413/40c018648b21%2C42331%2C1731789378413.1731789378733 has not been closed. Lease recovery is in progress. RecoveryId = 1032 for block blk_1073741830_1013 2024-11-16T20:36:49,110 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/MasterData/WALs/40c018648b21,42331,1731789378413/40c018648b21%2C42331%2C1731789378413.1731789378733 after 0ms 2024-11-16T20:36:49,116 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46159:46159),(127.0.0.1/127.0.0.1:37753:37753)] 2024-11-16T20:36:49,116 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/MasterData/WALs/40c018648b21,42331,1731789378413/40c018648b21%2C42331%2C1731789378413.1731789378733 is not closed yet, will try archiving it next time 2024-11-16T20:36:49,139 DEBUG [M:0;40c018648b21:42331 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/04e76bde0d8d47acb1650a32550cb6f7 is 82, key is hbase:meta,,1/info:regioninfo/1731789379662/Put/seqid=0 2024-11-16T20:36:49,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33935 is added to blk_1073741848_1033 (size=5672) 2024-11-16T20:36:49,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33431 is added to blk_1073741848_1033 (size=5672) 2024-11-16T20:36:49,146 INFO [M:0;40c018648b21:42331 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/04e76bde0d8d47acb1650a32550cb6f7 2024-11-16T20:36:49,167 DEBUG [M:0;40c018648b21:42331 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/568ae22fbba34780b25cd572d9e600ce is 779, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731789380217/Put/seqid=0 2024-11-16T20:36:49,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33935 is added to blk_1073741849_1034 (size=6119) 2024-11-16T20:36:49,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33431 is added to blk_1073741849_1034 (size=6119) 2024-11-16T20:36:49,172 INFO [M:0;40c018648b21:42331 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.58 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/568ae22fbba34780b25cd572d9e600ce 2024-11-16T20:36:49,192 DEBUG [M:0;40c018648b21:42331 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/c0cb6591a35b403ca64a5b4842886223 is 69, key is 40c018648b21,36051,1731789378581/rs:state/1731789379045/Put/seqid=0 2024-11-16T20:36:49,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33431 is added to blk_1073741850_1035 (size=5156) 2024-11-16T20:36:49,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33935 is added to blk_1073741850_1035 (size=5156) 2024-11-16T20:36:49,198 INFO [M:0;40c018648b21:42331 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/c0cb6591a35b403ca64a5b4842886223 2024-11-16T20:36:49,219 DEBUG [M:0;40c018648b21:42331 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/c60ed8e27c5347b0ae3be64604502b92 is 52, key is load_balancer_on/state:d/1731789379824/Put/seqid=0 2024-11-16T20:36:49,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33935 is added to blk_1073741851_1036 (size=5056) 2024-11-16T20:36:49,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33431 is added to blk_1073741851_1036 (size=5056) 2024-11-16T20:36:49,225 INFO [M:0;40c018648b21:42331 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/c60ed8e27c5347b0ae3be64604502b92 2024-11-16T20:36:49,231 DEBUG [M:0;40c018648b21:42331 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/04e76bde0d8d47acb1650a32550cb6f7 as hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/04e76bde0d8d47acb1650a32550cb6f7 2024-11-16T20:36:49,237 INFO [M:0;40c018648b21:42331 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/04e76bde0d8d47acb1650a32550cb6f7, entries=8, sequenceid=56, filesize=5.5 K 2024-11-16T20:36:49,239 DEBUG [M:0;40c018648b21:42331 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/568ae22fbba34780b25cd572d9e600ce as hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/568ae22fbba34780b25cd572d9e600ce 2024-11-16T20:36:49,245 INFO [M:0;40c018648b21:42331 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/568ae22fbba34780b25cd572d9e600ce, entries=6, sequenceid=56, filesize=6.0 K 2024-11-16T20:36:49,246 DEBUG [M:0;40c018648b21:42331 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/c0cb6591a35b403ca64a5b4842886223 as hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/c0cb6591a35b403ca64a5b4842886223 2024-11-16T20:36:49,252 INFO [M:0;40c018648b21:42331 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/c0cb6591a35b403ca64a5b4842886223, entries=1, sequenceid=56, filesize=5.0 K 2024-11-16T20:36:49,253 DEBUG [M:0;40c018648b21:42331 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/c60ed8e27c5347b0ae3be64604502b92 as hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/c60ed8e27c5347b0ae3be64604502b92 2024-11-16T20:36:49,259 INFO [M:0;40c018648b21:42331 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/c60ed8e27c5347b0ae3be64604502b92, entries=1, sequenceid=56, filesize=4.9 K 2024-11-16T20:36:49,260 INFO [M:0;40c018648b21:42331 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.18 KB/23738, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 158ms, sequenceid=56, compaction requested=false 2024-11-16T20:36:49,261 INFO [M:0;40c018648b21:42331 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T20:36:49,262 DEBUG [M:0;40c018648b21:42331 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731789409102Disabling compacts and flushes for region at 1731789409102Disabling writes for close at 1731789409102Obtaining lock to block concurrent updates at 1731789409102Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731789409102Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23738, getHeapSize=29800, getOffHeapSize=0, getCellsCount=67 at 1731789409103 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731789409117 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731789409117Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731789409139 (+22 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731789409139Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731789409151 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731789409167 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731789409167Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731789409178 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731789409192 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731789409192Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731789409203 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731789409219 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731789409219Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1a70f6b3: reopening flushed file at 1731789409231 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3350099b: reopening flushed file at 1731789409238 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@656d1a46: reopening flushed file at 1731789409245 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7b650502: reopening flushed file at 1731789409252 (+7 ms)Finished flush of dataSize ~23.18 KB/23738, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 158ms, sequenceid=56, compaction requested=false at 1731789409260 (+8 ms)Writing region close event to WAL at 1731789409261 (+1 ms)Closed at 1731789409261 2024-11-16T20:36:49,262 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:36:49,262 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:36:49,262 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:36:49,262 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:36:49,262 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:36:49,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33431 is added to blk_1073741847_1031 (size=757) 2024-11-16T20:36:49,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33935 is added to blk_1073741847_1031 (size=757) 2024-11-16T20:36:49,659 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:36:49,659 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:36:50,660 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:36:50,660 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:36:50,752 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:36:50,752 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:36:50,774 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:36:50,774 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:36:50,774 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:36:50,774 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:36:50,774 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:36:50,775 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:36:50,779 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:36:50,779 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:36:50,779 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:36:50,781 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:36:50,785 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:36:50,785 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:36:51,287 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-16T20:36:51,289 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:36:51,290 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:36:51,290 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:36:51,290 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:36:51,312 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:36:51,312 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:36:51,313 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:36:51,313 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:36:51,313 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:36:51,313 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:36:51,317 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:36:51,317 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:36:51,317 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:36:51,320 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:36:51,660 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:36:51,660 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:36:52,661 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:36:52,661 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:36:53,111 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/MasterData/WALs/40c018648b21,42331,1731789378413/40c018648b21%2C42331%2C1731789378413.1731789378733 after 4001ms 2024-11-16T20:36:53,112 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/MasterData/WALs/40c018648b21,42331,1731789378413/40c018648b21%2C42331%2C1731789378413.1731789378733 to hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/MasterData/oldWALs/40c018648b21%2C42331%2C1731789378413.1731789378733 2024-11-16T20:36:53,116 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/MasterData/oldWALs/40c018648b21%2C42331%2C1731789378413.1731789378733 to hdfs://localhost:45765/user/jenkins/test-data/ea1c3b89-aac7-189b-b831-0252fe0d29c1/oldWALs/40c018648b21%2C42331%2C1731789378413.1731789378733$masterlocalwal$ 2024-11-16T20:36:53,117 INFO [M:0;40c018648b21:42331 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-16T20:36:53,117 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-16T20:36:53,117 INFO [M:0;40c018648b21:42331 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42331 2024-11-16T20:36:53,117 INFO [M:0;40c018648b21:42331 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-16T20:36:53,127 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741830_1013: GenerationStamp not matched, existing replica is blk_1073741830_1006 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-16T20:36:53,236 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42331-0x101455ccf050000, quorum=127.0.0.1:63539, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T20:36:53,236 INFO [M:0;40c018648b21:42331 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-16T20:36:53,236 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42331-0x101455ccf050000, quorum=127.0.0.1:63539, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T20:36:53,240 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7c524311{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T20:36:53,240 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@523d3401{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T20:36:53,240 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T20:36:53,241 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3420abff{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T20:36:53,241 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5f3e5a16{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fc0ccb3-8d8a-0fae-2969-7ef6a7e107bb/hadoop.log.dir/,STOPPED} 2024-11-16T20:36:53,243 WARN [BP-1928493638-172.17.0.2-1731789375556 heartbeating to localhost/127.0.0.1:45765 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T20:36:53,243 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T20:36:53,243 WARN [BP-1928493638-172.17.0.2-1731789375556 heartbeating to localhost/127.0.0.1:45765 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1928493638-172.17.0.2-1731789375556 (Datanode Uuid 946683c1-2007-470d-9f92-4184b2d8bc02) service to localhost/127.0.0.1:45765 2024-11-16T20:36:53,243 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T20:36:53,243 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fc0ccb3-8d8a-0fae-2969-7ef6a7e107bb/cluster_b84409d9-577f-5fbd-0002-d56360c32b89/data/data3/current/BP-1928493638-172.17.0.2-1731789375556 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T20:36:53,244 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fc0ccb3-8d8a-0fae-2969-7ef6a7e107bb/cluster_b84409d9-577f-5fbd-0002-d56360c32b89/data/data4/current/BP-1928493638-172.17.0.2-1731789375556 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T20:36:53,244 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T20:36:53,247 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@a8b2822{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T20:36:53,247 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@68df0564{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T20:36:53,247 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T20:36:53,247 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6573e60c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T20:36:53,247 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ef50a45{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fc0ccb3-8d8a-0fae-2969-7ef6a7e107bb/hadoop.log.dir/,STOPPED} 2024-11-16T20:36:53,249 WARN [BP-1928493638-172.17.0.2-1731789375556 heartbeating to localhost/127.0.0.1:45765 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T20:36:53,249 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T20:36:53,249 WARN [BP-1928493638-172.17.0.2-1731789375556 heartbeating to localhost/127.0.0.1:45765 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1928493638-172.17.0.2-1731789375556 (Datanode Uuid 0cca2dae-91fe-4aff-bf61-df0664cf3d5f) service to localhost/127.0.0.1:45765 2024-11-16T20:36:53,249 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T20:36:53,249 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fc0ccb3-8d8a-0fae-2969-7ef6a7e107bb/cluster_b84409d9-577f-5fbd-0002-d56360c32b89/data/data1/current/BP-1928493638-172.17.0.2-1731789375556 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T20:36:53,250 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fc0ccb3-8d8a-0fae-2969-7ef6a7e107bb/cluster_b84409d9-577f-5fbd-0002-d56360c32b89/data/data2/current/BP-1928493638-172.17.0.2-1731789375556 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T20:36:53,250 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T20:36:53,256 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@232fa1ae{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-16T20:36:53,257 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5c07fc0{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T20:36:53,257 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T20:36:53,257 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@10c583a0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T20:36:53,257 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7096145a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fc0ccb3-8d8a-0fae-2969-7ef6a7e107bb/hadoop.log.dir/,STOPPED} 2024-11-16T20:36:53,263 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-16T20:36:53,282 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-16T20:36:53,290 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=183 (was 158) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45765 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:45765 from jenkins.hfs.4 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45765 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:45765 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:45765 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:45765 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-32-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45765 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.4@localhost:45765 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=457 (was 450) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=222 (was 225), ProcessCount=11 (was 11), AvailableMemoryMB=3920 (was 4168) 2024-11-16T20:36:53,298 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=183, OpenFileDescriptor=457, MaxFileDescriptor=1048576, SystemLoadAverage=222, ProcessCount=11, AvailableMemoryMB=3920 2024-11-16T20:36:53,298 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-16T20:36:53,299 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fc0ccb3-8d8a-0fae-2969-7ef6a7e107bb/hadoop.log.dir so I do NOT create it in target/test-data/430f53aa-c7a1-d61a-ad84-818e915da09e 2024-11-16T20:36:53,299 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fc0ccb3-8d8a-0fae-2969-7ef6a7e107bb/hadoop.tmp.dir so I do NOT create it in target/test-data/430f53aa-c7a1-d61a-ad84-818e915da09e 2024-11-16T20:36:53,299 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/430f53aa-c7a1-d61a-ad84-818e915da09e/cluster_2135b74f-5a03-20cb-8b39-1582a9f18290, deleteOnExit=true 2024-11-16T20:36:53,299 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-16T20:36:53,299 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/430f53aa-c7a1-d61a-ad84-818e915da09e/test.cache.data in system properties and HBase conf 2024-11-16T20:36:53,299 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/430f53aa-c7a1-d61a-ad84-818e915da09e/hadoop.tmp.dir in system properties and HBase conf 2024-11-16T20:36:53,299 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/430f53aa-c7a1-d61a-ad84-818e915da09e/hadoop.log.dir in system properties and HBase conf 2024-11-16T20:36:53,299 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/430f53aa-c7a1-d61a-ad84-818e915da09e/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-16T20:36:53,299 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/430f53aa-c7a1-d61a-ad84-818e915da09e/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-16T20:36:53,299 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-16T20:36:53,299 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-16T20:36:53,300 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/430f53aa-c7a1-d61a-ad84-818e915da09e/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-16T20:36:53,300 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/430f53aa-c7a1-d61a-ad84-818e915da09e/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-16T20:36:53,300 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/430f53aa-c7a1-d61a-ad84-818e915da09e/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-16T20:36:53,300 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/430f53aa-c7a1-d61a-ad84-818e915da09e/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-16T20:36:53,300 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/430f53aa-c7a1-d61a-ad84-818e915da09e/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-16T20:36:53,300 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/430f53aa-c7a1-d61a-ad84-818e915da09e/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-16T20:36:53,300 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/430f53aa-c7a1-d61a-ad84-818e915da09e/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-16T20:36:53,300 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/430f53aa-c7a1-d61a-ad84-818e915da09e/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-16T20:36:53,300 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/430f53aa-c7a1-d61a-ad84-818e915da09e/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-16T20:36:53,300 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/430f53aa-c7a1-d61a-ad84-818e915da09e/nfs.dump.dir in system properties and HBase conf 2024-11-16T20:36:53,300 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/430f53aa-c7a1-d61a-ad84-818e915da09e/java.io.tmpdir in system properties and HBase conf 2024-11-16T20:36:53,300 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/430f53aa-c7a1-d61a-ad84-818e915da09e/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-16T20:36:53,301 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/430f53aa-c7a1-d61a-ad84-818e915da09e/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-16T20:36:53,301 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/430f53aa-c7a1-d61a-ad84-818e915da09e/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-16T20:36:53,315 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-16T20:36:53,645 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T20:36:53,649 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T20:36:53,652 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T20:36:53,652 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T20:36:53,652 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-16T20:36:53,653 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T20:36:53,653 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5ea88c4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/430f53aa-c7a1-d61a-ad84-818e915da09e/hadoop.log.dir/,AVAILABLE} 2024-11-16T20:36:53,654 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@b24fbcd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T20:36:53,662 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:36:53,662 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:36:53,752 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@39212263{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/430f53aa-c7a1-d61a-ad84-818e915da09e/java.io.tmpdir/jetty-localhost-32941-hadoop-hdfs-3_4_1-tests_jar-_-any-779087937961320664/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-16T20:36:53,753 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@20f59884{HTTP/1.1, (http/1.1)}{localhost:32941} 2024-11-16T20:36:53,753 INFO [Time-limited test {}] server.Server(415): Started @195585ms 2024-11-16T20:36:53,765 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-16T20:36:54,187 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T20:36:54,196 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T20:36:54,198 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T20:36:54,198 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T20:36:54,198 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-16T20:36:54,201 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3dc731d8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/430f53aa-c7a1-d61a-ad84-818e915da09e/hadoop.log.dir/,AVAILABLE} 2024-11-16T20:36:54,201 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@678c2527{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T20:36:54,317 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@61e16297{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/430f53aa-c7a1-d61a-ad84-818e915da09e/java.io.tmpdir/jetty-localhost-42583-hadoop-hdfs-3_4_1-tests_jar-_-any-6012678029076099477/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T20:36:54,318 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4faae08f{HTTP/1.1, (http/1.1)}{localhost:42583} 2024-11-16T20:36:54,318 INFO [Time-limited test {}] server.Server(415): Started @196150ms 2024-11-16T20:36:54,319 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T20:36:54,382 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T20:36:54,387 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T20:36:54,388 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T20:36:54,388 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T20:36:54,389 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-16T20:36:54,392 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@317cfbff{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/430f53aa-c7a1-d61a-ad84-818e915da09e/hadoop.log.dir/,AVAILABLE} 2024-11-16T20:36:54,392 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@757e4aa2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T20:36:54,500 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@38b8be0d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/430f53aa-c7a1-d61a-ad84-818e915da09e/java.io.tmpdir/jetty-localhost-38459-hadoop-hdfs-3_4_1-tests_jar-_-any-15037473608617596445/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T20:36:54,501 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@34534d9{HTTP/1.1, (http/1.1)}{localhost:38459} 2024-11-16T20:36:54,501 INFO [Time-limited test {}] server.Server(415): Started @196333ms 2024-11-16T20:36:54,502 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T20:36:54,662 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:36:54,662 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:36:55,143 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-16T20:36:55,143 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-16T20:36:55,144 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-16T20:36:55,144 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-16T20:36:55,617 WARN [Thread-1650 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/430f53aa-c7a1-d61a-ad84-818e915da09e/cluster_2135b74f-5a03-20cb-8b39-1582a9f18290/data/data2/current/BP-1017383234-172.17.0.2-1731789413326/current, will proceed with Du for space computation calculation, 2024-11-16T20:36:55,617 WARN [Thread-1649 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/430f53aa-c7a1-d61a-ad84-818e915da09e/cluster_2135b74f-5a03-20cb-8b39-1582a9f18290/data/data1/current/BP-1017383234-172.17.0.2-1731789413326/current, will proceed with Du for space computation calculation, 2024-11-16T20:36:55,635 WARN [Thread-1613 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T20:36:55,637 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6262059035b3c6c1 with lease ID 0x69377744b4a9f513: Processing first storage report for DS-3456abd2-7e70-4a9b-8dda-475682f58168 from datanode DatanodeRegistration(127.0.0.1:35211, datanodeUuid=3ac1ff5c-7770-40b7-930c-2d06b71de9ab, infoPort=45543, infoSecurePort=0, ipcPort=38925, storageInfo=lv=-57;cid=testClusterID;nsid=1050025904;c=1731789413326) 2024-11-16T20:36:55,637 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6262059035b3c6c1 with lease ID 0x69377744b4a9f513: from storage DS-3456abd2-7e70-4a9b-8dda-475682f58168 node DatanodeRegistration(127.0.0.1:35211, datanodeUuid=3ac1ff5c-7770-40b7-930c-2d06b71de9ab, infoPort=45543, infoSecurePort=0, ipcPort=38925, storageInfo=lv=-57;cid=testClusterID;nsid=1050025904;c=1731789413326), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T20:36:55,637 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6262059035b3c6c1 with lease ID 0x69377744b4a9f513: Processing first storage report for DS-9c505099-f2fd-49dc-9d37-0e97359c9915 from datanode DatanodeRegistration(127.0.0.1:35211, datanodeUuid=3ac1ff5c-7770-40b7-930c-2d06b71de9ab, infoPort=45543, infoSecurePort=0, ipcPort=38925, storageInfo=lv=-57;cid=testClusterID;nsid=1050025904;c=1731789413326) 2024-11-16T20:36:55,637 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6262059035b3c6c1 with lease ID 0x69377744b4a9f513: from storage DS-9c505099-f2fd-49dc-9d37-0e97359c9915 node DatanodeRegistration(127.0.0.1:35211, datanodeUuid=3ac1ff5c-7770-40b7-930c-2d06b71de9ab, infoPort=45543, infoSecurePort=0, ipcPort=38925, storageInfo=lv=-57;cid=testClusterID;nsid=1050025904;c=1731789413326), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T20:36:55,663 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:36:55,663 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:36:55,750 WARN [Thread-1660 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/430f53aa-c7a1-d61a-ad84-818e915da09e/cluster_2135b74f-5a03-20cb-8b39-1582a9f18290/data/data3/current/BP-1017383234-172.17.0.2-1731789413326/current, will proceed with Du for space computation calculation, 2024-11-16T20:36:55,751 WARN [Thread-1661 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/430f53aa-c7a1-d61a-ad84-818e915da09e/cluster_2135b74f-5a03-20cb-8b39-1582a9f18290/data/data4/current/BP-1017383234-172.17.0.2-1731789413326/current, will proceed with Du for space computation calculation, 2024-11-16T20:36:55,773 WARN [Thread-1636 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T20:36:55,775 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x802bae712757d505 with lease ID 0x69377744b4a9f514: Processing first storage report for DS-deb3e6f1-014c-4e8a-a140-fc22d0c5e845 from datanode DatanodeRegistration(127.0.0.1:42185, datanodeUuid=739f6543-dfe3-4271-995a-006aaa885773, infoPort=37541, infoSecurePort=0, ipcPort=35659, storageInfo=lv=-57;cid=testClusterID;nsid=1050025904;c=1731789413326) 2024-11-16T20:36:55,775 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x802bae712757d505 with lease ID 0x69377744b4a9f514: from storage DS-deb3e6f1-014c-4e8a-a140-fc22d0c5e845 node DatanodeRegistration(127.0.0.1:42185, datanodeUuid=739f6543-dfe3-4271-995a-006aaa885773, infoPort=37541, infoSecurePort=0, ipcPort=35659, storageInfo=lv=-57;cid=testClusterID;nsid=1050025904;c=1731789413326), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T20:36:55,775 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x802bae712757d505 with lease ID 0x69377744b4a9f514: Processing first storage report for DS-101e09eb-8d15-4f3e-801f-2d6e884cfcf2 from datanode DatanodeRegistration(127.0.0.1:42185, datanodeUuid=739f6543-dfe3-4271-995a-006aaa885773, infoPort=37541, infoSecurePort=0, ipcPort=35659, storageInfo=lv=-57;cid=testClusterID;nsid=1050025904;c=1731789413326) 2024-11-16T20:36:55,775 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x802bae712757d505 with lease ID 0x69377744b4a9f514: from storage DS-101e09eb-8d15-4f3e-801f-2d6e884cfcf2 node DatanodeRegistration(127.0.0.1:42185, datanodeUuid=739f6543-dfe3-4271-995a-006aaa885773, infoPort=37541, infoSecurePort=0, ipcPort=35659, storageInfo=lv=-57;cid=testClusterID;nsid=1050025904;c=1731789413326), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T20:36:55,856 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/430f53aa-c7a1-d61a-ad84-818e915da09e 2024-11-16T20:36:55,859 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/430f53aa-c7a1-d61a-ad84-818e915da09e/cluster_2135b74f-5a03-20cb-8b39-1582a9f18290/zookeeper_0, clientPort=53558, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/430f53aa-c7a1-d61a-ad84-818e915da09e/cluster_2135b74f-5a03-20cb-8b39-1582a9f18290/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/430f53aa-c7a1-d61a-ad84-818e915da09e/cluster_2135b74f-5a03-20cb-8b39-1582a9f18290/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-16T20:36:55,865 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=53558 2024-11-16T20:36:55,865 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T20:36:55,866 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T20:36:55,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35211 is added to blk_1073741825_1001 (size=7) 2024-11-16T20:36:55,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42185 is added to blk_1073741825_1001 (size=7) 2024-11-16T20:36:55,881 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36 with version=8 2024-11-16T20:36:55,881 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/hbase-staging 2024-11-16T20:36:55,884 INFO [Time-limited test {}] client.ConnectionUtils(128): master/40c018648b21:0 server-side Connection retries=45 2024-11-16T20:36:55,884 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T20:36:55,884 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-16T20:36:55,884 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-16T20:36:55,884 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T20:36:55,884 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-16T20:36:55,884 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-16T20:36:55,884 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-16T20:36:55,896 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45571 2024-11-16T20:36:55,898 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:45571 connecting to ZooKeeper ensemble=127.0.0.1:53558 2024-11-16T20:36:55,956 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:455710x0, quorum=127.0.0.1:53558, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-16T20:36:55,957 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:45571-0x101455d615e0000 connected 2024-11-16T20:36:56,041 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T20:36:56,043 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T20:36:56,046 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45571-0x101455d615e0000, quorum=127.0.0.1:53558, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T20:36:56,046 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36, hbase.cluster.distributed=false 2024-11-16T20:36:56,048 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45571-0x101455d615e0000, quorum=127.0.0.1:53558, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-16T20:36:56,048 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45571 2024-11-16T20:36:56,048 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45571 2024-11-16T20:36:56,049 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45571 2024-11-16T20:36:56,049 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45571 2024-11-16T20:36:56,049 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45571 2024-11-16T20:36:56,066 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/40c018648b21:0 server-side Connection retries=45 2024-11-16T20:36:56,066 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T20:36:56,066 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-16T20:36:56,066 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-16T20:36:56,066 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T20:36:56,066 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-16T20:36:56,066 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-16T20:36:56,067 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-16T20:36:56,067 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40467 2024-11-16T20:36:56,069 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:40467 connecting to ZooKeeper ensemble=127.0.0.1:53558 2024-11-16T20:36:56,069 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T20:36:56,071 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T20:36:56,083 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:404670x0, quorum=127.0.0.1:53558, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-16T20:36:56,083 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:404670x0, quorum=127.0.0.1:53558, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T20:36:56,083 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:40467-0x101455d615e0001 connected 2024-11-16T20:36:56,083 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-16T20:36:56,084 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-16T20:36:56,084 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40467-0x101455d615e0001, quorum=127.0.0.1:53558, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-16T20:36:56,085 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40467-0x101455d615e0001, quorum=127.0.0.1:53558, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-16T20:36:56,086 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40467 2024-11-16T20:36:56,086 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40467 2024-11-16T20:36:56,086 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40467 2024-11-16T20:36:56,087 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40467 2024-11-16T20:36:56,087 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40467 2024-11-16T20:36:56,099 DEBUG [M:0;40c018648b21:45571 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;40c018648b21:45571 2024-11-16T20:36:56,100 INFO [master/40c018648b21:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/40c018648b21,45571,1731789415883 2024-11-16T20:36:56,108 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45571-0x101455d615e0000, quorum=127.0.0.1:53558, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T20:36:56,108 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40467-0x101455d615e0001, quorum=127.0.0.1:53558, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T20:36:56,109 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45571-0x101455d615e0000, quorum=127.0.0.1:53558, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/40c018648b21,45571,1731789415883 2024-11-16T20:36:56,119 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40467-0x101455d615e0001, quorum=127.0.0.1:53558, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-16T20:36:56,119 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45571-0x101455d615e0000, quorum=127.0.0.1:53558, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:36:56,119 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40467-0x101455d615e0001, quorum=127.0.0.1:53558, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:36:56,120 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45571-0x101455d615e0000, quorum=127.0.0.1:53558, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-16T20:36:56,120 INFO [master/40c018648b21:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/40c018648b21,45571,1731789415883 from backup master directory 2024-11-16T20:36:56,129 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40467-0x101455d615e0001, quorum=127.0.0.1:53558, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T20:36:56,129 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45571-0x101455d615e0000, quorum=127.0.0.1:53558, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/40c018648b21,45571,1731789415883 2024-11-16T20:36:56,129 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45571-0x101455d615e0000, quorum=127.0.0.1:53558, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T20:36:56,129 WARN [master/40c018648b21:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-16T20:36:56,129 INFO [master/40c018648b21:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=40c018648b21,45571,1731789415883 2024-11-16T20:36:56,134 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/hbase.id] with ID: f7d3b93b-d876-4fc0-8014-2dea252fd696 2024-11-16T20:36:56,134 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/.tmp/hbase.id 2024-11-16T20:36:56,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42185 is added to blk_1073741826_1002 (size=42) 2024-11-16T20:36:56,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35211 is added to blk_1073741826_1002 (size=42) 2024-11-16T20:36:56,142 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/.tmp/hbase.id]:[hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/hbase.id] 2024-11-16T20:36:56,154 INFO [master/40c018648b21:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T20:36:56,154 INFO [master/40c018648b21:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-16T20:36:56,155 INFO [master/40c018648b21:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-16T20:36:56,167 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45571-0x101455d615e0000, quorum=127.0.0.1:53558, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:36:56,167 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40467-0x101455d615e0001, quorum=127.0.0.1:53558, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:36:56,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42185 is added to blk_1073741827_1003 (size=196) 2024-11-16T20:36:56,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35211 is added to blk_1073741827_1003 (size=196) 2024-11-16T20:36:56,175 INFO [master/40c018648b21:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-16T20:36:56,176 INFO [master/40c018648b21:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-16T20:36:56,177 INFO [master/40c018648b21:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T20:36:56,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42185 is added to blk_1073741828_1004 (size=1189) 2024-11-16T20:36:56,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35211 is added to blk_1073741828_1004 (size=1189) 2024-11-16T20:36:56,189 INFO [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/MasterData/data/master/store 2024-11-16T20:36:56,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42185 is added to blk_1073741829_1005 (size=34) 2024-11-16T20:36:56,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35211 is added to blk_1073741829_1005 (size=34) 2024-11-16T20:36:56,196 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T20:36:56,196 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-16T20:36:56,196 INFO [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T20:36:56,196 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T20:36:56,196 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-16T20:36:56,196 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T20:36:56,196 INFO [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T20:36:56,196 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731789416196Disabling compacts and flushes for region at 1731789416196Disabling writes for close at 1731789416196Writing region close event to WAL at 1731789416196Closed at 1731789416196 2024-11-16T20:36:56,197 WARN [master/40c018648b21:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/MasterData/data/master/store/.initializing 2024-11-16T20:36:56,197 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/MasterData/WALs/40c018648b21,45571,1731789415883 2024-11-16T20:36:56,200 INFO [master/40c018648b21:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=40c018648b21%2C45571%2C1731789415883, suffix=, logDir=hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/MasterData/WALs/40c018648b21,45571,1731789415883, archiveDir=hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/MasterData/oldWALs, maxLogs=10 2024-11-16T20:36:56,200 INFO [master/40c018648b21:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 40c018648b21%2C45571%2C1731789415883.1731789416200 2024-11-16T20:36:56,205 INFO [master/40c018648b21:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/MasterData/WALs/40c018648b21,45571,1731789415883/40c018648b21%2C45571%2C1731789415883.1731789416200 2024-11-16T20:36:56,205 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37541:37541),(127.0.0.1/127.0.0.1:45543:45543)] 2024-11-16T20:36:56,206 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-16T20:36:56,206 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T20:36:56,206 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T20:36:56,206 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T20:36:56,207 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T20:36:56,208 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-16T20:36:56,209 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:36:56,209 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T20:36:56,209 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T20:36:56,210 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-16T20:36:56,210 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:36:56,211 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T20:36:56,211 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T20:36:56,212 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-16T20:36:56,212 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:36:56,212 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T20:36:56,212 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T20:36:56,213 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-16T20:36:56,213 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:36:56,214 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T20:36:56,214 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T20:36:56,214 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-16T20:36:56,215 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-16T20:36:56,216 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T20:36:56,216 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T20:36:56,217 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-16T20:36:56,218 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T20:36:56,220 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T20:36:56,220 INFO [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=759980, jitterRate=-0.03363606333732605}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-16T20:36:56,221 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731789416206Initializing all the Stores at 1731789416207 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731789416207Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731789416207Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731789416207Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731789416207Cleaning up temporary data from old regions at 1731789416216 (+9 ms)Region opened successfully at 1731789416221 (+5 ms) 2024-11-16T20:36:56,221 INFO [master/40c018648b21:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-16T20:36:56,224 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@10dc0a50, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=40c018648b21/172.17.0.2:0 2024-11-16T20:36:56,225 INFO [master/40c018648b21:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-16T20:36:56,225 INFO [master/40c018648b21:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-16T20:36:56,225 INFO [master/40c018648b21:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-16T20:36:56,225 INFO [master/40c018648b21:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-16T20:36:56,226 INFO [master/40c018648b21:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-16T20:36:56,226 INFO [master/40c018648b21:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-16T20:36:56,226 INFO [master/40c018648b21:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-16T20:36:56,230 INFO [master/40c018648b21:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-16T20:36:56,231 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45571-0x101455d615e0000, quorum=127.0.0.1:53558, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-16T20:36:56,241 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-16T20:36:56,241 INFO [master/40c018648b21:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-16T20:36:56,242 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45571-0x101455d615e0000, quorum=127.0.0.1:53558, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-16T20:36:56,251 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-16T20:36:56,252 INFO [master/40c018648b21:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-16T20:36:56,253 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45571-0x101455d615e0000, quorum=127.0.0.1:53558, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-16T20:36:56,262 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-16T20:36:56,263 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45571-0x101455d615e0000, quorum=127.0.0.1:53558, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-16T20:36:56,272 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-16T20:36:56,275 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45571-0x101455d615e0000, quorum=127.0.0.1:53558, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-16T20:36:56,287 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-16T20:36:56,298 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40467-0x101455d615e0001, quorum=127.0.0.1:53558, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-16T20:36:56,298 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45571-0x101455d615e0000, quorum=127.0.0.1:53558, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-16T20:36:56,298 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40467-0x101455d615e0001, quorum=127.0.0.1:53558, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:36:56,298 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45571-0x101455d615e0000, quorum=127.0.0.1:53558, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:36:56,298 INFO [master/40c018648b21:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=40c018648b21,45571,1731789415883, sessionid=0x101455d615e0000, setting cluster-up flag (Was=false) 2024-11-16T20:36:56,319 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40467-0x101455d615e0001, quorum=127.0.0.1:53558, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:36:56,319 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45571-0x101455d615e0000, quorum=127.0.0.1:53558, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:36:56,350 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-16T20:36:56,352 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=40c018648b21,45571,1731789415883 2024-11-16T20:36:56,371 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40467-0x101455d615e0001, quorum=127.0.0.1:53558, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:36:56,371 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45571-0x101455d615e0000, quorum=127.0.0.1:53558, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:36:56,403 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-16T20:36:56,404 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=40c018648b21,45571,1731789415883 2024-11-16T20:36:56,405 INFO [master/40c018648b21:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-16T20:36:56,407 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-16T20:36:56,408 INFO [master/40c018648b21:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-16T20:36:56,408 INFO [master/40c018648b21:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-16T20:36:56,408 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 40c018648b21,45571,1731789415883 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-16T20:36:56,410 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/40c018648b21:0, corePoolSize=5, maxPoolSize=5 2024-11-16T20:36:56,410 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/40c018648b21:0, corePoolSize=5, maxPoolSize=5 2024-11-16T20:36:56,410 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/40c018648b21:0, corePoolSize=5, maxPoolSize=5 2024-11-16T20:36:56,410 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/40c018648b21:0, corePoolSize=5, maxPoolSize=5 2024-11-16T20:36:56,410 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/40c018648b21:0, corePoolSize=10, maxPoolSize=10 2024-11-16T20:36:56,410 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:36:56,410 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/40c018648b21:0, corePoolSize=2, maxPoolSize=2 2024-11-16T20:36:56,410 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:36:56,421 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T20:36:56,422 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-16T20:36:56,423 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:36:56,423 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-16T20:36:56,429 INFO [master/40c018648b21:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731789446428 2024-11-16T20:36:56,429 INFO [master/40c018648b21:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-16T20:36:56,429 INFO [master/40c018648b21:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-16T20:36:56,429 INFO [master/40c018648b21:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-16T20:36:56,429 INFO [master/40c018648b21:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-16T20:36:56,429 INFO [master/40c018648b21:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-16T20:36:56,429 INFO [master/40c018648b21:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-16T20:36:56,432 INFO [master/40c018648b21:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-16T20:36:56,437 INFO [master/40c018648b21:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-16T20:36:56,437 INFO [master/40c018648b21:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-16T20:36:56,437 INFO [master/40c018648b21:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-16T20:36:56,437 INFO [master/40c018648b21:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-16T20:36:56,437 INFO [master/40c018648b21:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-16T20:36:56,437 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/40c018648b21:0:becomeActiveMaster-HFileCleaner.large.0-1731789416437,5,FailOnTimeoutGroup] 2024-11-16T20:36:56,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42185 is added to blk_1073741831_1007 (size=1321) 2024-11-16T20:36:56,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35211 is added to blk_1073741831_1007 (size=1321) 2024-11-16T20:36:56,440 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/40c018648b21:0:becomeActiveMaster-HFileCleaner.small.0-1731789416437,5,FailOnTimeoutGroup] 2024-11-16T20:36:56,440 INFO [master/40c018648b21:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-16T20:36:56,440 INFO [master/40c018648b21:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-16T20:36:56,440 INFO [master/40c018648b21:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-16T20:36:56,440 INFO [master/40c018648b21:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-16T20:36:56,440 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-16T20:36:56,441 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36 2024-11-16T20:36:56,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35211 is added to blk_1073741832_1008 (size=32) 2024-11-16T20:36:56,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42185 is added to blk_1073741832_1008 (size=32) 2024-11-16T20:36:56,451 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T20:36:56,452 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-16T20:36:56,453 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-16T20:36:56,453 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:36:56,454 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T20:36:56,454 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-16T20:36:56,455 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-16T20:36:56,455 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:36:56,456 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T20:36:56,456 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-16T20:36:56,457 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-16T20:36:56,457 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:36:56,458 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T20:36:56,458 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-16T20:36:56,459 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-16T20:36:56,459 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:36:56,460 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T20:36:56,460 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-16T20:36:56,461 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/data/hbase/meta/1588230740 2024-11-16T20:36:56,461 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/data/hbase/meta/1588230740 2024-11-16T20:36:56,463 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-16T20:36:56,463 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-16T20:36:56,464 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-16T20:36:56,465 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-16T20:36:56,467 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T20:36:56,468 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=780325, jitterRate=-0.007766619324684143}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-16T20:36:56,469 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731789416451Initializing all the Stores at 1731789416452 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731789416452Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731789416452Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731789416452Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731789416452Cleaning up temporary data from old regions at 1731789416463 (+11 ms)Region opened successfully at 1731789416469 (+6 ms) 2024-11-16T20:36:56,469 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-16T20:36:56,469 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-16T20:36:56,469 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-16T20:36:56,469 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-16T20:36:56,469 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-16T20:36:56,470 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-16T20:36:56,470 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731789416469Disabling compacts and flushes for region at 1731789416469Disabling writes for close at 1731789416469Writing region close event to WAL at 1731789416470 (+1 ms)Closed at 1731789416470 2024-11-16T20:36:56,471 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T20:36:56,472 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-16T20:36:56,472 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-16T20:36:56,473 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-16T20:36:56,475 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-16T20:36:56,489 INFO [RS:0;40c018648b21:40467 {}] regionserver.HRegionServer(746): ClusterId : f7d3b93b-d876-4fc0-8014-2dea252fd696 2024-11-16T20:36:56,489 DEBUG [RS:0;40c018648b21:40467 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-16T20:36:56,498 DEBUG [RS:0;40c018648b21:40467 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-16T20:36:56,499 DEBUG [RS:0;40c018648b21:40467 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-16T20:36:56,509 DEBUG [RS:0;40c018648b21:40467 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-16T20:36:56,509 DEBUG [RS:0;40c018648b21:40467 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@31a24dde, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=40c018648b21/172.17.0.2:0 2024-11-16T20:36:56,521 DEBUG [RS:0;40c018648b21:40467 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;40c018648b21:40467 2024-11-16T20:36:56,521 INFO [RS:0;40c018648b21:40467 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-16T20:36:56,521 INFO [RS:0;40c018648b21:40467 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-16T20:36:56,522 DEBUG [RS:0;40c018648b21:40467 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-16T20:36:56,522 INFO [RS:0;40c018648b21:40467 {}] regionserver.HRegionServer(2659): reportForDuty to master=40c018648b21,45571,1731789415883 with port=40467, startcode=1731789416066 2024-11-16T20:36:56,523 DEBUG [RS:0;40c018648b21:40467 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-16T20:36:56,525 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48283, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-11-16T20:36:56,525 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45571 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 40c018648b21,40467,1731789416066 2024-11-16T20:36:56,525 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45571 {}] master.ServerManager(517): Registering regionserver=40c018648b21,40467,1731789416066 2024-11-16T20:36:56,527 DEBUG [RS:0;40c018648b21:40467 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36 2024-11-16T20:36:56,527 DEBUG [RS:0;40c018648b21:40467 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:43375 2024-11-16T20:36:56,527 DEBUG [RS:0;40c018648b21:40467 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-16T20:36:56,540 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45571-0x101455d615e0000, quorum=127.0.0.1:53558, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-16T20:36:56,540 DEBUG [RS:0;40c018648b21:40467 {}] zookeeper.ZKUtil(111): regionserver:40467-0x101455d615e0001, quorum=127.0.0.1:53558, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/40c018648b21,40467,1731789416066 2024-11-16T20:36:56,541 WARN [RS:0;40c018648b21:40467 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-16T20:36:56,541 INFO [RS:0;40c018648b21:40467 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T20:36:56,541 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [40c018648b21,40467,1731789416066] 2024-11-16T20:36:56,541 DEBUG [RS:0;40c018648b21:40467 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/WALs/40c018648b21,40467,1731789416066 2024-11-16T20:36:56,544 INFO [RS:0;40c018648b21:40467 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-16T20:36:56,546 INFO [RS:0;40c018648b21:40467 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-16T20:36:56,546 INFO [RS:0;40c018648b21:40467 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-16T20:36:56,546 INFO [RS:0;40c018648b21:40467 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T20:36:56,546 INFO [RS:0;40c018648b21:40467 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-16T20:36:56,547 INFO [RS:0;40c018648b21:40467 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-16T20:36:56,547 INFO [RS:0;40c018648b21:40467 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-16T20:36:56,547 DEBUG [RS:0;40c018648b21:40467 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:36:56,547 DEBUG [RS:0;40c018648b21:40467 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:36:56,547 DEBUG [RS:0;40c018648b21:40467 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:36:56,547 DEBUG [RS:0;40c018648b21:40467 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:36:56,547 DEBUG [RS:0;40c018648b21:40467 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:36:56,547 DEBUG [RS:0;40c018648b21:40467 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/40c018648b21:0, corePoolSize=2, maxPoolSize=2 2024-11-16T20:36:56,547 DEBUG [RS:0;40c018648b21:40467 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:36:56,547 DEBUG [RS:0;40c018648b21:40467 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:36:56,547 DEBUG [RS:0;40c018648b21:40467 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:36:56,547 DEBUG [RS:0;40c018648b21:40467 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:36:56,548 DEBUG [RS:0;40c018648b21:40467 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:36:56,548 DEBUG [RS:0;40c018648b21:40467 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:36:56,548 DEBUG [RS:0;40c018648b21:40467 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/40c018648b21:0, corePoolSize=3, maxPoolSize=3 2024-11-16T20:36:56,548 DEBUG [RS:0;40c018648b21:40467 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/40c018648b21:0, corePoolSize=3, maxPoolSize=3 2024-11-16T20:36:56,549 INFO [RS:0;40c018648b21:40467 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T20:36:56,549 INFO [RS:0;40c018648b21:40467 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T20:36:56,549 INFO [RS:0;40c018648b21:40467 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T20:36:56,549 INFO [RS:0;40c018648b21:40467 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-16T20:36:56,549 INFO [RS:0;40c018648b21:40467 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-16T20:36:56,549 INFO [RS:0;40c018648b21:40467 {}] hbase.ChoreService(168): Chore ScheduledChore name=40c018648b21,40467,1731789416066-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-16T20:36:56,568 INFO [RS:0;40c018648b21:40467 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-16T20:36:56,568 INFO [RS:0;40c018648b21:40467 {}] hbase.ChoreService(168): Chore ScheduledChore name=40c018648b21,40467,1731789416066-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T20:36:56,568 INFO [RS:0;40c018648b21:40467 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T20:36:56,568 INFO [RS:0;40c018648b21:40467 {}] regionserver.Replication(171): 40c018648b21,40467,1731789416066 started 2024-11-16T20:36:56,583 INFO [RS:0;40c018648b21:40467 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T20:36:56,583 INFO [RS:0;40c018648b21:40467 {}] regionserver.HRegionServer(1482): Serving as 40c018648b21,40467,1731789416066, RpcServer on 40c018648b21/172.17.0.2:40467, sessionid=0x101455d615e0001 2024-11-16T20:36:56,583 DEBUG [RS:0;40c018648b21:40467 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-16T20:36:56,583 DEBUG [RS:0;40c018648b21:40467 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 40c018648b21,40467,1731789416066 2024-11-16T20:36:56,583 DEBUG [RS:0;40c018648b21:40467 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '40c018648b21,40467,1731789416066' 2024-11-16T20:36:56,583 DEBUG [RS:0;40c018648b21:40467 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-16T20:36:56,584 DEBUG [RS:0;40c018648b21:40467 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-16T20:36:56,584 DEBUG [RS:0;40c018648b21:40467 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-16T20:36:56,584 DEBUG [RS:0;40c018648b21:40467 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-16T20:36:56,584 DEBUG [RS:0;40c018648b21:40467 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 40c018648b21,40467,1731789416066 2024-11-16T20:36:56,584 DEBUG [RS:0;40c018648b21:40467 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '40c018648b21,40467,1731789416066' 2024-11-16T20:36:56,585 DEBUG [RS:0;40c018648b21:40467 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-16T20:36:56,585 DEBUG [RS:0;40c018648b21:40467 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-16T20:36:56,585 DEBUG [RS:0;40c018648b21:40467 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-16T20:36:56,585 INFO [RS:0;40c018648b21:40467 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-16T20:36:56,585 INFO [RS:0;40c018648b21:40467 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-16T20:36:56,625 WARN [40c018648b21:45571 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-16T20:36:56,664 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:36:56,664 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:36:56,689 INFO [RS:0;40c018648b21:40467 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=40c018648b21%2C40467%2C1731789416066, suffix=, logDir=hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/WALs/40c018648b21,40467,1731789416066, archiveDir=hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/oldWALs, maxLogs=32 2024-11-16T20:36:56,690 INFO [RS:0;40c018648b21:40467 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 40c018648b21%2C40467%2C1731789416066.1731789416690 2024-11-16T20:36:56,699 INFO [RS:0;40c018648b21:40467 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/WALs/40c018648b21,40467,1731789416066/40c018648b21%2C40467%2C1731789416066.1731789416690 2024-11-16T20:36:56,700 DEBUG [RS:0;40c018648b21:40467 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45543:45543),(127.0.0.1/127.0.0.1:37541:37541)] 2024-11-16T20:36:56,875 DEBUG [40c018648b21:45571 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-16T20:36:56,877 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=40c018648b21,40467,1731789416066 2024-11-16T20:36:56,880 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 40c018648b21,40467,1731789416066, state=OPENING 2024-11-16T20:36:56,940 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-16T20:36:56,951 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45571-0x101455d615e0000, quorum=127.0.0.1:53558, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:36:56,951 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40467-0x101455d615e0001, quorum=127.0.0.1:53558, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:36:56,952 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-16T20:36:56,952 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T20:36:56,952 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T20:36:56,952 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=40c018648b21,40467,1731789416066}] 2024-11-16T20:36:57,109 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-16T20:36:57,113 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33789, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-16T20:36:57,120 INFO [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-16T20:36:57,120 INFO [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T20:36:57,124 INFO [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=40c018648b21%2C40467%2C1731789416066.meta, suffix=.meta, logDir=hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/WALs/40c018648b21,40467,1731789416066, archiveDir=hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/oldWALs, maxLogs=32 2024-11-16T20:36:57,125 INFO [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 40c018648b21%2C40467%2C1731789416066.meta.1731789417125.meta 2024-11-16T20:36:57,131 INFO [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/WALs/40c018648b21,40467,1731789416066/40c018648b21%2C40467%2C1731789416066.meta.1731789417125.meta 2024-11-16T20:36:57,136 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37541:37541),(127.0.0.1/127.0.0.1:45543:45543)] 2024-11-16T20:36:57,140 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-16T20:36:57,141 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-16T20:36:57,141 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-16T20:36:57,141 INFO [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-16T20:36:57,141 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-16T20:36:57,141 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T20:36:57,141 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-16T20:36:57,141 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-16T20:36:57,143 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-16T20:36:57,144 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-16T20:36:57,144 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:36:57,144 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T20:36:57,144 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-16T20:36:57,145 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-16T20:36:57,145 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:36:57,145 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T20:36:57,145 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-16T20:36:57,146 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-16T20:36:57,146 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:36:57,146 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T20:36:57,146 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-16T20:36:57,147 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-16T20:36:57,147 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:36:57,147 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T20:36:57,148 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-16T20:36:57,148 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/data/hbase/meta/1588230740 2024-11-16T20:36:57,149 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/data/hbase/meta/1588230740 2024-11-16T20:36:57,151 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-16T20:36:57,151 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-16T20:36:57,151 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-16T20:36:57,153 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-16T20:36:57,153 INFO [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=768042, jitterRate=-0.023385122418403625}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-16T20:36:57,154 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-16T20:36:57,154 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731789417141Writing region info on filesystem at 1731789417141Initializing all the Stores at 1731789417142 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731789417142Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731789417143 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731789417143Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731789417143Cleaning up temporary data from old regions at 1731789417151 (+8 ms)Running coprocessor post-open hooks at 1731789417154 (+3 ms)Region opened successfully at 1731789417154 2024-11-16T20:36:57,155 INFO [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731789417109 2024-11-16T20:36:57,158 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-16T20:36:57,158 INFO [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-16T20:36:57,159 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=40c018648b21,40467,1731789416066 2024-11-16T20:36:57,160 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 40c018648b21,40467,1731789416066, state=OPEN 2024-11-16T20:36:57,200 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45571-0x101455d615e0000, quorum=127.0.0.1:53558, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-16T20:36:57,200 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40467-0x101455d615e0001, quorum=127.0.0.1:53558, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-16T20:36:57,200 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=40c018648b21,40467,1731789416066 2024-11-16T20:36:57,200 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T20:36:57,200 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T20:36:57,203 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-16T20:36:57,203 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=40c018648b21,40467,1731789416066 in 248 msec 2024-11-16T20:36:57,206 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-16T20:36:57,206 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 731 msec 2024-11-16T20:36:57,207 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T20:36:57,207 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-16T20:36:57,208 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T20:36:57,208 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=40c018648b21,40467,1731789416066, seqNum=-1] 2024-11-16T20:36:57,208 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T20:36:57,210 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60773, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T20:36:57,216 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 808 msec 2024-11-16T20:36:57,217 INFO [master/40c018648b21:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731789417217, completionTime=-1 2024-11-16T20:36:57,217 INFO [master/40c018648b21:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-16T20:36:57,217 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-16T20:36:57,219 INFO [master/40c018648b21:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-16T20:36:57,219 INFO [master/40c018648b21:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731789477219 2024-11-16T20:36:57,219 INFO [master/40c018648b21:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731789537219 2024-11-16T20:36:57,219 INFO [master/40c018648b21:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-16T20:36:57,219 INFO [master/40c018648b21:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=40c018648b21,45571,1731789415883-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T20:36:57,220 INFO [master/40c018648b21:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=40c018648b21,45571,1731789415883-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T20:36:57,220 INFO [master/40c018648b21:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=40c018648b21,45571,1731789415883-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T20:36:57,220 INFO [master/40c018648b21:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-40c018648b21:45571, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T20:36:57,220 INFO [master/40c018648b21:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-16T20:36:57,220 INFO [master/40c018648b21:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-16T20:36:57,222 DEBUG [master/40c018648b21:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-16T20:36:57,224 INFO [master/40c018648b21:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.095sec 2024-11-16T20:36:57,224 INFO [master/40c018648b21:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-16T20:36:57,224 INFO [master/40c018648b21:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-16T20:36:57,224 INFO [master/40c018648b21:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-16T20:36:57,224 INFO [master/40c018648b21:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-16T20:36:57,224 INFO [master/40c018648b21:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-16T20:36:57,224 INFO [master/40c018648b21:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=40c018648b21,45571,1731789415883-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-16T20:36:57,224 INFO [master/40c018648b21:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=40c018648b21,45571,1731789415883-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-16T20:36:57,227 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-16T20:36:57,227 INFO [master/40c018648b21:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-16T20:36:57,227 INFO [master/40c018648b21:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=40c018648b21,45571,1731789415883-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T20:36:57,289 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@380e5c32, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T20:36:57,289 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 40c018648b21,45571,-1 for getting cluster id 2024-11-16T20:36:57,290 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-16T20:36:57,292 DEBUG [HMaster-EventLoopGroup-12-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'f7d3b93b-d876-4fc0-8014-2dea252fd696' 2024-11-16T20:36:57,292 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-16T20:36:57,292 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "f7d3b93b-d876-4fc0-8014-2dea252fd696" 2024-11-16T20:36:57,293 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@66106cb4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T20:36:57,293 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [40c018648b21,45571,-1] 2024-11-16T20:36:57,293 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-16T20:36:57,293 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T20:36:57,295 INFO [HMaster-EventLoopGroup-12-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39180, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-16T20:36:57,296 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@680fc8dd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T20:36:57,296 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T20:36:57,298 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=40c018648b21,40467,1731789416066, seqNum=-1] 2024-11-16T20:36:57,298 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T20:36:57,299 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36246, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T20:36:57,301 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=40c018648b21,45571,1731789415883 2024-11-16T20:36:57,301 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T20:36:57,305 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-16T20:36:57,305 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-16T20:36:57,307 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.AsyncConnectionImpl(321): The fetched master address is 40c018648b21,45571,1731789415883 2024-11-16T20:36:57,307 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@2a113252 2024-11-16T20:36:57,307 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-16T20:36:57,308 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39184, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-16T20:36:57,309 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45571 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-16T20:36:57,309 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45571 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-16T20:36:57,309 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45571 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testCompactionRecordDoesntBlockRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-16T20:36:57,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45571 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-16T20:36:57,312 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-16T20:36:57,312 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:36:57,312 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45571 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testCompactionRecordDoesntBlockRolling" procId is: 4 2024-11-16T20:36:57,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45571 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-16T20:36:57,313 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-16T20:36:57,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42185 is added to blk_1073741835_1011 (size=405) 2024-11-16T20:36:57,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35211 is added to blk_1073741835_1011 (size=405) 2024-11-16T20:36:57,323 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 629f515851bf451fc77b9044611926ab, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731789417309.629f515851bf451fc77b9044611926ab.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testCompactionRecordDoesntBlockRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36 2024-11-16T20:36:57,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35211 is added to blk_1073741836_1012 (size=88) 2024-11-16T20:36:57,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42185 is added to blk_1073741836_1012 (size=88) 2024-11-16T20:36:57,330 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731789417309.629f515851bf451fc77b9044611926ab.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T20:36:57,330 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1722): Closing 629f515851bf451fc77b9044611926ab, disabling compactions & flushes 2024-11-16T20:36:57,330 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731789417309.629f515851bf451fc77b9044611926ab. 2024-11-16T20:36:57,330 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731789417309.629f515851bf451fc77b9044611926ab. 2024-11-16T20:36:57,330 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731789417309.629f515851bf451fc77b9044611926ab. after waiting 0 ms 2024-11-16T20:36:57,330 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731789417309.629f515851bf451fc77b9044611926ab. 2024-11-16T20:36:57,330 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731789417309.629f515851bf451fc77b9044611926ab. 2024-11-16T20:36:57,330 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 629f515851bf451fc77b9044611926ab: Waiting for close lock at 1731789417330Disabling compacts and flushes for region at 1731789417330Disabling writes for close at 1731789417330Writing region close event to WAL at 1731789417330Closed at 1731789417330 2024-11-16T20:36:57,332 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-16T20:36:57,332 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731789417309.629f515851bf451fc77b9044611926ab.","families":{"info":[{"qualifier":"regioninfo","vlen":87,"tag":[],"timestamp":"1731789417332"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731789417332"}]},"ts":"1731789417332"} 2024-11-16T20:36:57,335 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-16T20:36:57,336 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-16T20:36:57,336 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731789417336"}]},"ts":"1731789417336"} 2024-11-16T20:36:57,338 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLING in hbase:meta 2024-11-16T20:36:57,339 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=629f515851bf451fc77b9044611926ab, ASSIGN}] 2024-11-16T20:36:57,340 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=629f515851bf451fc77b9044611926ab, ASSIGN 2024-11-16T20:36:57,341 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=629f515851bf451fc77b9044611926ab, ASSIGN; state=OFFLINE, location=40c018648b21,40467,1731789416066; forceNewPlan=false, retain=false 2024-11-16T20:36:57,492 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=629f515851bf451fc77b9044611926ab, regionState=OPENING, regionLocation=40c018648b21,40467,1731789416066 2024-11-16T20:36:57,495 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=629f515851bf451fc77b9044611926ab, ASSIGN because future has completed 2024-11-16T20:36:57,495 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 629f515851bf451fc77b9044611926ab, server=40c018648b21,40467,1731789416066}] 2024-11-16T20:36:57,652 INFO [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731789417309.629f515851bf451fc77b9044611926ab. 2024-11-16T20:36:57,652 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 629f515851bf451fc77b9044611926ab, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731789417309.629f515851bf451fc77b9044611926ab.', STARTKEY => '', ENDKEY => ''} 2024-11-16T20:36:57,653 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testCompactionRecordDoesntBlockRolling 629f515851bf451fc77b9044611926ab 2024-11-16T20:36:57,653 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731789417309.629f515851bf451fc77b9044611926ab.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T20:36:57,653 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 629f515851bf451fc77b9044611926ab 2024-11-16T20:36:57,653 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 629f515851bf451fc77b9044611926ab 2024-11-16T20:36:57,654 INFO [StoreOpener-629f515851bf451fc77b9044611926ab-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 629f515851bf451fc77b9044611926ab 2024-11-16T20:36:57,655 INFO [StoreOpener-629f515851bf451fc77b9044611926ab-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 629f515851bf451fc77b9044611926ab columnFamilyName info 2024-11-16T20:36:57,656 DEBUG [StoreOpener-629f515851bf451fc77b9044611926ab-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:36:57,656 INFO [StoreOpener-629f515851bf451fc77b9044611926ab-1 {}] regionserver.HStore(327): Store=629f515851bf451fc77b9044611926ab/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T20:36:57,656 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 629f515851bf451fc77b9044611926ab 2024-11-16T20:36:57,657 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/629f515851bf451fc77b9044611926ab 2024-11-16T20:36:57,657 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/629f515851bf451fc77b9044611926ab 2024-11-16T20:36:57,657 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 629f515851bf451fc77b9044611926ab 2024-11-16T20:36:57,657 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 629f515851bf451fc77b9044611926ab 2024-11-16T20:36:57,659 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 629f515851bf451fc77b9044611926ab 2024-11-16T20:36:57,661 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/629f515851bf451fc77b9044611926ab/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T20:36:57,661 INFO [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 629f515851bf451fc77b9044611926ab; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=766587, jitterRate=-0.025235086679458618}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-16T20:36:57,661 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 629f515851bf451fc77b9044611926ab 2024-11-16T20:36:57,662 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 629f515851bf451fc77b9044611926ab: Running coprocessor pre-open hook at 1731789417653Writing region info on filesystem at 1731789417653Initializing all the Stores at 1731789417654 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731789417654Cleaning up temporary data from old regions at 1731789417657 (+3 ms)Running coprocessor post-open hooks at 1731789417661 (+4 ms)Region opened successfully at 1731789417662 (+1 ms) 2024-11-16T20:36:57,663 INFO [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731789417309.629f515851bf451fc77b9044611926ab., pid=6, masterSystemTime=1731789417648 2024-11-16T20:36:57,665 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:36:57,665 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:36:57,666 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731789417309.629f515851bf451fc77b9044611926ab. 2024-11-16T20:36:57,666 INFO [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731789417309.629f515851bf451fc77b9044611926ab. 2024-11-16T20:36:57,666 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=629f515851bf451fc77b9044611926ab, regionState=OPEN, openSeqNum=2, regionLocation=40c018648b21,40467,1731789416066 2024-11-16T20:36:57,669 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 629f515851bf451fc77b9044611926ab, server=40c018648b21,40467,1731789416066 because future has completed 2024-11-16T20:36:57,673 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-16T20:36:57,673 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 629f515851bf451fc77b9044611926ab, server=40c018648b21,40467,1731789416066 in 175 msec 2024-11-16T20:36:57,676 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-16T20:36:57,676 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=629f515851bf451fc77b9044611926ab, ASSIGN in 334 msec 2024-11-16T20:36:57,677 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-16T20:36:57,678 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731789417677"}]},"ts":"1731789417677"} 2024-11-16T20:36:57,680 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLED in hbase:meta 2024-11-16T20:36:57,682 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-16T20:36:57,684 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 373 msec 2024-11-16T20:36:58,665 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:36:58,665 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:36:59,666 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:36:59,666 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:00,647 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-16T20:37:00,649 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:37:00,649 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:37:00,649 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:37:00,650 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:37:00,650 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:37:00,650 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:37:00,667 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:00,667 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:00,672 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:37:00,672 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:37:00,672 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:37:00,673 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:37:00,673 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:37:00,673 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:37:00,677 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:37:00,677 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:37:00,678 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:37:00,680 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:37:01,668 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:01,668 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:02,544 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-16T20:37:02,545 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testCompactionRecordDoesntBlockRolling' 2024-11-16T20:37:02,669 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:02,669 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:03,669 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:03,669 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:04,670 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:04,670 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:05,143 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-16T20:37:05,143 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-16T20:37:05,144 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-16T20:37:05,144 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-16T20:37:05,144 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-16T20:37:05,144 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-16T20:37:05,145 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-16T20:37:05,145 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling Metrics about Tables on a single HBase RegionServer 2024-11-16T20:37:05,670 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:05,670 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:06,671 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:06,671 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:07,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45571 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-16T20:37:07,345 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-16T20:37:07,345 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testCompactionRecordDoesntBlockRolling,, stopping at row=TestLogRolling-testCompactionRecordDoesntBlockRolling ,, for max=2147483647 with caching=100 2024-11-16T20:37:07,350 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-16T20:37:07,350 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731789417309.629f515851bf451fc77b9044611926ab. 2024-11-16T20:37:07,354 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testCompactionRecordDoesntBlockRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731789417309.629f515851bf451fc77b9044611926ab., hostname=40c018648b21,40467,1731789416066, seqNum=2] 2024-11-16T20:37:07,362 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45571 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-16T20:37:07,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45571 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-16T20:37:07,367 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-16T20:37:07,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45571 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-16T20:37:07,368 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-16T20:37:07,370 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-16T20:37:07,531 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40467 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-16T20:37:07,531 DEBUG [RS_FLUSH_OPERATIONS-regionserver/40c018648b21:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731789417309.629f515851bf451fc77b9044611926ab. 2024-11-16T20:37:07,531 INFO [RS_FLUSH_OPERATIONS-regionserver/40c018648b21:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 629f515851bf451fc77b9044611926ab 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-16T20:37:07,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/40c018648b21:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/629f515851bf451fc77b9044611926ab/.tmp/info/bf1d1796c36342eb9368654bbce5840c is 1080, key is row0001/info:/1731789427355/Put/seqid=0 2024-11-16T20:37:07,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35211 is added to blk_1073741837_1013 (size=6033) 2024-11-16T20:37:07,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42185 is added to blk_1073741837_1013 (size=6033) 2024-11-16T20:37:07,555 INFO [RS_FLUSH_OPERATIONS-regionserver/40c018648b21:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/629f515851bf451fc77b9044611926ab/.tmp/info/bf1d1796c36342eb9368654bbce5840c 2024-11-16T20:37:07,563 DEBUG [RS_FLUSH_OPERATIONS-regionserver/40c018648b21:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/629f515851bf451fc77b9044611926ab/.tmp/info/bf1d1796c36342eb9368654bbce5840c as hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/629f515851bf451fc77b9044611926ab/info/bf1d1796c36342eb9368654bbce5840c 2024-11-16T20:37:07,569 INFO [RS_FLUSH_OPERATIONS-regionserver/40c018648b21:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/629f515851bf451fc77b9044611926ab/info/bf1d1796c36342eb9368654bbce5840c, entries=1, sequenceid=5, filesize=5.9 K 2024-11-16T20:37:07,570 INFO [RS_FLUSH_OPERATIONS-regionserver/40c018648b21:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 629f515851bf451fc77b9044611926ab in 39ms, sequenceid=5, compaction requested=false 2024-11-16T20:37:07,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/40c018648b21:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 629f515851bf451fc77b9044611926ab: 2024-11-16T20:37:07,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/40c018648b21:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731789417309.629f515851bf451fc77b9044611926ab. 2024-11-16T20:37:07,572 DEBUG [RS_FLUSH_OPERATIONS-regionserver/40c018648b21:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-16T20:37:07,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45571 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-16T20:37:07,579 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-16T20:37:07,579 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 206 msec 2024-11-16T20:37:07,582 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 217 msec 2024-11-16T20:37:07,672 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:07,672 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:08,672 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:08,672 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:09,673 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:09,673 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:10,674 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:10,674 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:11,675 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:11,675 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:12,676 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:12,676 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:13,676 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:13,676 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:14,677 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:14,677 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:15,678 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:15,678 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:16,678 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:16,678 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:16,679 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta after 68048ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor202.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T20:37:16,679 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 after 68057ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor202.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T20:37:17,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45571 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-16T20:37:17,443 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-16T20:37:17,446 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45571 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-16T20:37:17,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45571 {}] procedure2.ProcedureExecutor(1139): Stored pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-16T20:37:17,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45571 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-16T20:37:17,449 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-16T20:37:17,450 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-16T20:37:17,450 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-16T20:37:17,603 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40467 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=10 2024-11-16T20:37:17,604 DEBUG [RS_FLUSH_OPERATIONS-regionserver/40c018648b21:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731789417309.629f515851bf451fc77b9044611926ab. 2024-11-16T20:37:17,604 INFO [RS_FLUSH_OPERATIONS-regionserver/40c018648b21:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2902): Flushing 629f515851bf451fc77b9044611926ab 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-16T20:37:17,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/40c018648b21:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/629f515851bf451fc77b9044611926ab/.tmp/info/d579659a13954a8ab98bc40fcba24b99 is 1080, key is row0002/info:/1731789437445/Put/seqid=0 2024-11-16T20:37:17,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42185 is added to blk_1073741838_1014 (size=6033) 2024-11-16T20:37:17,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35211 is added to blk_1073741838_1014 (size=6033) 2024-11-16T20:37:17,618 INFO [RS_FLUSH_OPERATIONS-regionserver/40c018648b21:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/629f515851bf451fc77b9044611926ab/.tmp/info/d579659a13954a8ab98bc40fcba24b99 2024-11-16T20:37:17,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/40c018648b21:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/629f515851bf451fc77b9044611926ab/.tmp/info/d579659a13954a8ab98bc40fcba24b99 as hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/629f515851bf451fc77b9044611926ab/info/d579659a13954a8ab98bc40fcba24b99 2024-11-16T20:37:17,633 INFO [RS_FLUSH_OPERATIONS-regionserver/40c018648b21:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/629f515851bf451fc77b9044611926ab/info/d579659a13954a8ab98bc40fcba24b99, entries=1, sequenceid=9, filesize=5.9 K 2024-11-16T20:37:17,634 INFO [RS_FLUSH_OPERATIONS-regionserver/40c018648b21:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 629f515851bf451fc77b9044611926ab in 30ms, sequenceid=9, compaction requested=false 2024-11-16T20:37:17,634 DEBUG [RS_FLUSH_OPERATIONS-regionserver/40c018648b21:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2603): Flush status journal for 629f515851bf451fc77b9044611926ab: 2024-11-16T20:37:17,634 DEBUG [RS_FLUSH_OPERATIONS-regionserver/40c018648b21:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731789417309.629f515851bf451fc77b9044611926ab. 2024-11-16T20:37:17,634 DEBUG [RS_FLUSH_OPERATIONS-regionserver/40c018648b21:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=10 2024-11-16T20:37:17,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45571 {}] master.HMaster(4169): Remote procedure done, pid=10 2024-11-16T20:37:17,640 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-11-16T20:37:17,640 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 186 msec 2024-11-16T20:37:17,642 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 195 msec 2024-11-16T20:37:17,679 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:17,679 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:18,680 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:18,680 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:19,680 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:19,680 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:20,681 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:20,681 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:21,682 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:21,683 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:22,683 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:22,683 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:23,684 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:23,684 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:24,685 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:24,685 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:25,685 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:25,685 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:25,856 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-16T20:37:26,686 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:26,686 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:27,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45571 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-16T20:37:27,494 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-16T20:37:27,497 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 40c018648b21%2C40467%2C1731789416066.1731789447497 2024-11-16T20:37:27,502 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:37:27,502 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:37:27,502 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:37:27,502 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:37:27,503 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:37:27,503 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/WALs/40c018648b21,40467,1731789416066/40c018648b21%2C40467%2C1731789416066.1731789416690 with entries=8, filesize=5.41 KB; new WAL /user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/WALs/40c018648b21,40467,1731789416066/40c018648b21%2C40467%2C1731789416066.1731789447497 2024-11-16T20:37:27,503 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45543:45543),(127.0.0.1/127.0.0.1:37541:37541)] 2024-11-16T20:37:27,504 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/WALs/40c018648b21,40467,1731789416066/40c018648b21%2C40467%2C1731789416066.1731789416690 is not closed yet, will try archiving it next time 2024-11-16T20:37:27,504 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45571 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-16T20:37:27,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42185 is added to blk_1073741833_1009 (size=5546) 2024-11-16T20:37:27,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35211 is added to blk_1073741833_1009 (size=5546) 2024-11-16T20:37:27,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45571 {}] procedure2.ProcedureExecutor(1139): Stored pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-16T20:37:27,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45571 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-16T20:37:27,507 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-16T20:37:27,508 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-16T20:37:27,508 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-16T20:37:27,662 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40467 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=12 2024-11-16T20:37:27,662 DEBUG [RS_FLUSH_OPERATIONS-regionserver/40c018648b21:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731789417309.629f515851bf451fc77b9044611926ab. 2024-11-16T20:37:27,663 INFO [RS_FLUSH_OPERATIONS-regionserver/40c018648b21:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2902): Flushing 629f515851bf451fc77b9044611926ab 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-16T20:37:27,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/40c018648b21:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/629f515851bf451fc77b9044611926ab/.tmp/info/fcbbf53016d343bf99e53c03ad0ebe13 is 1080, key is row0003/info:/1731789447495/Put/seqid=0 2024-11-16T20:37:27,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35211 is added to blk_1073741840_1016 (size=6033) 2024-11-16T20:37:27,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42185 is added to blk_1073741840_1016 (size=6033) 2024-11-16T20:37:27,675 INFO [RS_FLUSH_OPERATIONS-regionserver/40c018648b21:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/629f515851bf451fc77b9044611926ab/.tmp/info/fcbbf53016d343bf99e53c03ad0ebe13 2024-11-16T20:37:27,684 DEBUG [RS_FLUSH_OPERATIONS-regionserver/40c018648b21:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/629f515851bf451fc77b9044611926ab/.tmp/info/fcbbf53016d343bf99e53c03ad0ebe13 as hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/629f515851bf451fc77b9044611926ab/info/fcbbf53016d343bf99e53c03ad0ebe13 2024-11-16T20:37:27,687 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:27,687 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:27,690 INFO [RS_FLUSH_OPERATIONS-regionserver/40c018648b21:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/629f515851bf451fc77b9044611926ab/info/fcbbf53016d343bf99e53c03ad0ebe13, entries=1, sequenceid=13, filesize=5.9 K 2024-11-16T20:37:27,691 INFO [RS_FLUSH_OPERATIONS-regionserver/40c018648b21:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 629f515851bf451fc77b9044611926ab in 29ms, sequenceid=13, compaction requested=true 2024-11-16T20:37:27,691 DEBUG [RS_FLUSH_OPERATIONS-regionserver/40c018648b21:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2603): Flush status journal for 629f515851bf451fc77b9044611926ab: 2024-11-16T20:37:27,691 DEBUG [RS_FLUSH_OPERATIONS-regionserver/40c018648b21:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731789417309.629f515851bf451fc77b9044611926ab. 2024-11-16T20:37:27,691 DEBUG [RS_FLUSH_OPERATIONS-regionserver/40c018648b21:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=12 2024-11-16T20:37:27,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45571 {}] master.HMaster(4169): Remote procedure done, pid=12 2024-11-16T20:37:27,695 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-11-16T20:37:27,695 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 185 msec 2024-11-16T20:37:27,698 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 192 msec 2024-11-16T20:37:28,687 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:28,687 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:29,688 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:29,688 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:30,689 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:30,689 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:31,690 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:31,690 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:32,691 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:32,691 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:33,692 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:33,692 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:34,692 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:34,692 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:35,693 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:35,693 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:36,694 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:36,694 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:37,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45571 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-16T20:37:37,614 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-16T20:37:37,614 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-16T20:37:37,616 DEBUG [Time-limited test {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 18099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-16T20:37:37,616 DEBUG [Time-limited test {}] regionserver.HStore(1541): 629f515851bf451fc77b9044611926ab/info is initiating minor compaction (all files) 2024-11-16T20:37:37,616 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-16T20:37:37,616 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T20:37:37,616 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of 629f515851bf451fc77b9044611926ab/info in TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731789417309.629f515851bf451fc77b9044611926ab. 2024-11-16T20:37:37,616 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/629f515851bf451fc77b9044611926ab/info/bf1d1796c36342eb9368654bbce5840c, hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/629f515851bf451fc77b9044611926ab/info/d579659a13954a8ab98bc40fcba24b99, hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/629f515851bf451fc77b9044611926ab/info/fcbbf53016d343bf99e53c03ad0ebe13] into tmpdir=hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/629f515851bf451fc77b9044611926ab/.tmp, totalSize=17.7 K 2024-11-16T20:37:37,617 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting bf1d1796c36342eb9368654bbce5840c, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1731789427355 2024-11-16T20:37:37,618 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting d579659a13954a8ab98bc40fcba24b99, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=9, earliestPutTs=1731789437445 2024-11-16T20:37:37,618 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting fcbbf53016d343bf99e53c03ad0ebe13, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1731789447495 2024-11-16T20:37:37,633 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): 629f515851bf451fc77b9044611926ab#info#compaction#45 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-16T20:37:37,634 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/629f515851bf451fc77b9044611926ab/.tmp/info/0a9a7d80e32f41a49b1912b95770791c is 1080, key is row0001/info:/1731789427355/Put/seqid=0 2024-11-16T20:37:37,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35211 is added to blk_1073741841_1017 (size=8296) 2024-11-16T20:37:37,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42185 is added to blk_1073741841_1017 (size=8296) 2024-11-16T20:37:37,647 INFO [master/40c018648b21:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-16T20:37:37,647 INFO [master/40c018648b21:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-16T20:37:37,648 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/629f515851bf451fc77b9044611926ab/.tmp/info/0a9a7d80e32f41a49b1912b95770791c as hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/629f515851bf451fc77b9044611926ab/info/0a9a7d80e32f41a49b1912b95770791c 2024-11-16T20:37:37,655 INFO [Time-limited test {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 629f515851bf451fc77b9044611926ab/info of 629f515851bf451fc77b9044611926ab into 0a9a7d80e32f41a49b1912b95770791c(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-16T20:37:37,655 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for 629f515851bf451fc77b9044611926ab: 2024-11-16T20:37:37,658 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 40c018648b21%2C40467%2C1731789416066.1731789457657 2024-11-16T20:37:37,663 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:37:37,664 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:37:37,664 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:37:37,664 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:37:37,664 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:37:37,664 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/WALs/40c018648b21,40467,1731789416066/40c018648b21%2C40467%2C1731789416066.1731789447497 with entries=4, filesize=2.45 KB; new WAL /user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/WALs/40c018648b21,40467,1731789416066/40c018648b21%2C40467%2C1731789416066.1731789457657 2024-11-16T20:37:37,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42185 is added to blk_1073741839_1015 (size=2520) 2024-11-16T20:37:37,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35211 is added to blk_1073741839_1015 (size=2520) 2024-11-16T20:37:37,668 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45543:45543),(127.0.0.1/127.0.0.1:37541:37541)] 2024-11-16T20:37:37,668 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/WALs/40c018648b21,40467,1731789416066/40c018648b21%2C40467%2C1731789416066.1731789416690 to hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/oldWALs/40c018648b21%2C40467%2C1731789416066.1731789416690 2024-11-16T20:37:37,669 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45571 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-16T20:37:37,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45571 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-16T20:37:37,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45571 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-16T20:37:37,672 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-16T20:37:37,673 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-16T20:37:37,673 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-16T20:37:37,695 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:37,695 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:37,825 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40467 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=14 2024-11-16T20:37:37,825 DEBUG [RS_FLUSH_OPERATIONS-regionserver/40c018648b21:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731789417309.629f515851bf451fc77b9044611926ab. 2024-11-16T20:37:37,826 INFO [RS_FLUSH_OPERATIONS-regionserver/40c018648b21:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2902): Flushing 629f515851bf451fc77b9044611926ab 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-16T20:37:37,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/40c018648b21:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/629f515851bf451fc77b9044611926ab/.tmp/info/78999ddb6cd64b6a9adfe319461eaab9 is 1080, key is row0000/info:/1731789457656/Put/seqid=0 2024-11-16T20:37:37,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42185 is added to blk_1073741843_1019 (size=6033) 2024-11-16T20:37:37,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35211 is added to blk_1073741843_1019 (size=6033) 2024-11-16T20:37:37,839 INFO [RS_FLUSH_OPERATIONS-regionserver/40c018648b21:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/629f515851bf451fc77b9044611926ab/.tmp/info/78999ddb6cd64b6a9adfe319461eaab9 2024-11-16T20:37:37,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/40c018648b21:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/629f515851bf451fc77b9044611926ab/.tmp/info/78999ddb6cd64b6a9adfe319461eaab9 as hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/629f515851bf451fc77b9044611926ab/info/78999ddb6cd64b6a9adfe319461eaab9 2024-11-16T20:37:37,853 INFO [RS_FLUSH_OPERATIONS-regionserver/40c018648b21:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/629f515851bf451fc77b9044611926ab/info/78999ddb6cd64b6a9adfe319461eaab9, entries=1, sequenceid=18, filesize=5.9 K 2024-11-16T20:37:37,854 INFO [RS_FLUSH_OPERATIONS-regionserver/40c018648b21:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 629f515851bf451fc77b9044611926ab in 29ms, sequenceid=18, compaction requested=false 2024-11-16T20:37:37,854 DEBUG [RS_FLUSH_OPERATIONS-regionserver/40c018648b21:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for 629f515851bf451fc77b9044611926ab: 2024-11-16T20:37:37,854 DEBUG [RS_FLUSH_OPERATIONS-regionserver/40c018648b21:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731789417309.629f515851bf451fc77b9044611926ab. 2024-11-16T20:37:37,854 DEBUG [RS_FLUSH_OPERATIONS-regionserver/40c018648b21:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-11-16T20:37:37,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45571 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-11-16T20:37:37,859 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-11-16T20:37:37,859 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 183 msec 2024-11-16T20:37:37,861 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 191 msec 2024-11-16T20:37:38,695 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:38,695 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:39,696 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:39,696 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:40,697 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:40,697 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:41,697 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:41,697 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:42,653 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 629f515851bf451fc77b9044611926ab, had cached 0 bytes from a total of 14329 2024-11-16T20:37:42,698 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:42,698 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:43,699 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:43,699 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:44,699 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:44,699 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:45,700 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:45,700 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:46,701 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:46,701 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:47,702 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:47,702 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:47,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45571 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-16T20:37:47,754 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-16T20:37:47,758 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 40c018648b21%2C40467%2C1731789416066.1731789467758 2024-11-16T20:37:47,765 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:37:47,766 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:37:47,766 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:37:47,766 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:37:47,766 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:37:47,766 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/WALs/40c018648b21,40467,1731789416066/40c018648b21%2C40467%2C1731789416066.1731789457657 with entries=3, filesize=1.97 KB; new WAL /user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/WALs/40c018648b21,40467,1731789416066/40c018648b21%2C40467%2C1731789416066.1731789467758 2024-11-16T20:37:47,767 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37541:37541),(127.0.0.1/127.0.0.1:45543:45543)] 2024-11-16T20:37:47,767 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/WALs/40c018648b21,40467,1731789416066/40c018648b21%2C40467%2C1731789416066.1731789457657 is not closed yet, will try archiving it next time 2024-11-16T20:37:47,767 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-16T20:37:47,767 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/WALs/40c018648b21,40467,1731789416066/40c018648b21%2C40467%2C1731789416066.1731789447497 to hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/oldWALs/40c018648b21%2C40467%2C1731789416066.1731789447497 2024-11-16T20:37:47,768 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-16T20:37:47,768 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T20:37:47,768 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T20:37:47,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42185 is added to blk_1073741842_1018 (size=2026) 2024-11-16T20:37:47,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35211 is added to blk_1073741842_1018 (size=2026) 2024-11-16T20:37:47,773 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T20:37:47,773 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-16T20:37:47,773 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-16T20:37:47,773 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1696646448, stopped=false 2024-11-16T20:37:47,773 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=40c018648b21,45571,1731789415883 2024-11-16T20:37:47,831 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40467-0x101455d615e0001, quorum=127.0.0.1:53558, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-16T20:37:47,831 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45571-0x101455d615e0000, quorum=127.0.0.1:53558, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-16T20:37:47,831 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45571-0x101455d615e0000, quorum=127.0.0.1:53558, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:37:47,831 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40467-0x101455d615e0001, quorum=127.0.0.1:53558, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:37:47,831 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-16T20:37:47,831 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-16T20:37:47,831 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T20:37:47,831 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T20:37:47,831 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:45571-0x101455d615e0000, quorum=127.0.0.1:53558, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T20:37:47,832 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '40c018648b21,40467,1731789416066' ***** 2024-11-16T20:37:47,832 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-16T20:37:47,832 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:40467-0x101455d615e0001, quorum=127.0.0.1:53558, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T20:37:47,832 INFO [RS:0;40c018648b21:40467 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-16T20:37:47,832 INFO [RS:0;40c018648b21:40467 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-16T20:37:47,832 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-16T20:37:47,832 INFO [RS:0;40c018648b21:40467 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-16T20:37:47,832 INFO [RS:0;40c018648b21:40467 {}] regionserver.HRegionServer(3091): Received CLOSE for 629f515851bf451fc77b9044611926ab 2024-11-16T20:37:47,833 INFO [RS:0;40c018648b21:40467 {}] regionserver.HRegionServer(959): stopping server 40c018648b21,40467,1731789416066 2024-11-16T20:37:47,833 INFO [RS:0;40c018648b21:40467 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-16T20:37:47,833 INFO [RS:0;40c018648b21:40467 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;40c018648b21:40467. 2024-11-16T20:37:47,833 DEBUG [RS:0;40c018648b21:40467 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T20:37:47,833 DEBUG [RS_CLOSE_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 629f515851bf451fc77b9044611926ab, disabling compactions & flushes 2024-11-16T20:37:47,833 DEBUG [RS:0;40c018648b21:40467 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T20:37:47,833 INFO [RS_CLOSE_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731789417309.629f515851bf451fc77b9044611926ab. 2024-11-16T20:37:47,833 DEBUG [RS_CLOSE_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731789417309.629f515851bf451fc77b9044611926ab. 2024-11-16T20:37:47,833 INFO [RS:0;40c018648b21:40467 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-16T20:37:47,833 INFO [RS:0;40c018648b21:40467 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-16T20:37:47,833 DEBUG [RS_CLOSE_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731789417309.629f515851bf451fc77b9044611926ab. after waiting 0 ms 2024-11-16T20:37:47,833 INFO [RS:0;40c018648b21:40467 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-16T20:37:47,833 DEBUG [RS_CLOSE_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731789417309.629f515851bf451fc77b9044611926ab. 2024-11-16T20:37:47,833 INFO [RS:0;40c018648b21:40467 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-16T20:37:47,833 INFO [RS_CLOSE_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 629f515851bf451fc77b9044611926ab 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-16T20:37:47,834 INFO [RS:0;40c018648b21:40467 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-16T20:37:47,834 DEBUG [RS:0;40c018648b21:40467 {}] regionserver.HRegionServer(1325): Online Regions={629f515851bf451fc77b9044611926ab=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731789417309.629f515851bf451fc77b9044611926ab., 1588230740=hbase:meta,,1.1588230740} 2024-11-16T20:37:47,834 DEBUG [RS:0;40c018648b21:40467 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 629f515851bf451fc77b9044611926ab 2024-11-16T20:37:47,834 DEBUG [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-16T20:37:47,834 INFO [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-16T20:37:47,834 DEBUG [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-16T20:37:47,834 DEBUG [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-16T20:37:47,834 DEBUG [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-16T20:37:47,834 INFO [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.89 KB heapSize=3.91 KB 2024-11-16T20:37:47,840 DEBUG [RS_CLOSE_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/629f515851bf451fc77b9044611926ab/.tmp/info/5665d3a15ad74a9ca853513068dec4fa is 1080, key is row0001/info:/1731789467756/Put/seqid=0 2024-11-16T20:37:47,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42185 is added to blk_1073741845_1021 (size=6033) 2024-11-16T20:37:47,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35211 is added to blk_1073741845_1021 (size=6033) 2024-11-16T20:37:47,846 INFO [RS_CLOSE_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=22 (bloomFilter=true), to=hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/629f515851bf451fc77b9044611926ab/.tmp/info/5665d3a15ad74a9ca853513068dec4fa 2024-11-16T20:37:47,852 DEBUG [RS_CLOSE_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/629f515851bf451fc77b9044611926ab/.tmp/info/5665d3a15ad74a9ca853513068dec4fa as hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/629f515851bf451fc77b9044611926ab/info/5665d3a15ad74a9ca853513068dec4fa 2024-11-16T20:37:47,855 DEBUG [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/data/hbase/meta/1588230740/.tmp/info/761ee27850c34111b80c17f96ed73239 is 227, key is TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731789417309.629f515851bf451fc77b9044611926ab./info:regioninfo/1731789417666/Put/seqid=0 2024-11-16T20:37:47,858 INFO [RS_CLOSE_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/629f515851bf451fc77b9044611926ab/info/5665d3a15ad74a9ca853513068dec4fa, entries=1, sequenceid=22, filesize=5.9 K 2024-11-16T20:37:47,859 INFO [RS_CLOSE_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 629f515851bf451fc77b9044611926ab in 26ms, sequenceid=22, compaction requested=true 2024-11-16T20:37:47,871 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731789417309.629f515851bf451fc77b9044611926ab.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/629f515851bf451fc77b9044611926ab/info/bf1d1796c36342eb9368654bbce5840c, hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/629f515851bf451fc77b9044611926ab/info/d579659a13954a8ab98bc40fcba24b99, hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/629f515851bf451fc77b9044611926ab/info/fcbbf53016d343bf99e53c03ad0ebe13] to archive 2024-11-16T20:37:47,872 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731789417309.629f515851bf451fc77b9044611926ab.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-16T20:37:47,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35211 is added to blk_1073741846_1022 (size=7308) 2024-11-16T20:37:47,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42185 is added to blk_1073741846_1022 (size=7308) 2024-11-16T20:37:47,874 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731789417309.629f515851bf451fc77b9044611926ab.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/629f515851bf451fc77b9044611926ab/info/bf1d1796c36342eb9368654bbce5840c to hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/629f515851bf451fc77b9044611926ab/info/bf1d1796c36342eb9368654bbce5840c 2024-11-16T20:37:47,874 INFO [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.65 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/data/hbase/meta/1588230740/.tmp/info/761ee27850c34111b80c17f96ed73239 2024-11-16T20:37:47,875 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731789417309.629f515851bf451fc77b9044611926ab.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/629f515851bf451fc77b9044611926ab/info/d579659a13954a8ab98bc40fcba24b99 to hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/629f515851bf451fc77b9044611926ab/info/d579659a13954a8ab98bc40fcba24b99 2024-11-16T20:37:47,877 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731789417309.629f515851bf451fc77b9044611926ab.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/629f515851bf451fc77b9044611926ab/info/fcbbf53016d343bf99e53c03ad0ebe13 to hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/629f515851bf451fc77b9044611926ab/info/fcbbf53016d343bf99e53c03ad0ebe13 2024-11-16T20:37:47,877 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731789417309.629f515851bf451fc77b9044611926ab.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=40c018648b21:45571 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-16T20:37:47,877 WARN [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731789417309.629f515851bf451fc77b9044611926ab.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [bf1d1796c36342eb9368654bbce5840c=6033, d579659a13954a8ab98bc40fcba24b99=6033, fcbbf53016d343bf99e53c03ad0ebe13=6033] 2024-11-16T20:37:47,884 DEBUG [RS_CLOSE_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/629f515851bf451fc77b9044611926ab/recovered.edits/25.seqid, newMaxSeqId=25, maxSeqId=1 2024-11-16T20:37:47,885 INFO [RS_CLOSE_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731789417309.629f515851bf451fc77b9044611926ab. 2024-11-16T20:37:47,885 DEBUG [RS_CLOSE_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 629f515851bf451fc77b9044611926ab: Waiting for close lock at 1731789467833Running coprocessor pre-close hooks at 1731789467833Disabling compacts and flushes for region at 1731789467833Disabling writes for close at 1731789467833Obtaining lock to block concurrent updates at 1731789467833Preparing flush snapshotting stores in 629f515851bf451fc77b9044611926ab at 1731789467833Finished memstore snapshotting TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731789417309.629f515851bf451fc77b9044611926ab., syncing WAL and waiting on mvcc, flushsize=dataSize=1076, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1731789467834 (+1 ms)Flushing stores of TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731789417309.629f515851bf451fc77b9044611926ab. at 1731789467835 (+1 ms)Flushing 629f515851bf451fc77b9044611926ab/info: creating writer at 1731789467835Flushing 629f515851bf451fc77b9044611926ab/info: appending metadata at 1731789467839 (+4 ms)Flushing 629f515851bf451fc77b9044611926ab/info: closing flushed file at 1731789467839Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@660e76f2: reopening flushed file at 1731789467851 (+12 ms)Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 629f515851bf451fc77b9044611926ab in 26ms, sequenceid=22, compaction requested=true at 1731789467859 (+8 ms)Writing region close event to WAL at 1731789467878 (+19 ms)Running coprocessor post-close hooks at 1731789467885 (+7 ms)Closed at 1731789467885 2024-11-16T20:37:47,885 DEBUG [RS_CLOSE_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731789417309.629f515851bf451fc77b9044611926ab. 2024-11-16T20:37:47,908 DEBUG [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/data/hbase/meta/1588230740/.tmp/ns/8e666379828a46cab913cfea4a3c5db5 is 43, key is default/ns:d/1731789417211/Put/seqid=0 2024-11-16T20:37:47,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35211 is added to blk_1073741847_1023 (size=5153) 2024-11-16T20:37:47,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42185 is added to blk_1073741847_1023 (size=5153) 2024-11-16T20:37:47,913 INFO [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/data/hbase/meta/1588230740/.tmp/ns/8e666379828a46cab913cfea4a3c5db5 2024-11-16T20:37:47,933 DEBUG [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/data/hbase/meta/1588230740/.tmp/table/baace4f9e74142aab05d4d5b3d3bc6e1 is 89, key is TestLogRolling-testCompactionRecordDoesntBlockRolling/table:state/1731789417677/Put/seqid=0 2024-11-16T20:37:47,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35211 is added to blk_1073741848_1024 (size=5508) 2024-11-16T20:37:47,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42185 is added to blk_1073741848_1024 (size=5508) 2024-11-16T20:37:47,938 INFO [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=170 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/data/hbase/meta/1588230740/.tmp/table/baace4f9e74142aab05d4d5b3d3bc6e1 2024-11-16T20:37:47,945 DEBUG [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/data/hbase/meta/1588230740/.tmp/info/761ee27850c34111b80c17f96ed73239 as hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/data/hbase/meta/1588230740/info/761ee27850c34111b80c17f96ed73239 2024-11-16T20:37:47,951 INFO [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/data/hbase/meta/1588230740/info/761ee27850c34111b80c17f96ed73239, entries=10, sequenceid=11, filesize=7.1 K 2024-11-16T20:37:47,952 DEBUG [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/data/hbase/meta/1588230740/.tmp/ns/8e666379828a46cab913cfea4a3c5db5 as hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/data/hbase/meta/1588230740/ns/8e666379828a46cab913cfea4a3c5db5 2024-11-16T20:37:47,958 INFO [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/data/hbase/meta/1588230740/ns/8e666379828a46cab913cfea4a3c5db5, entries=2, sequenceid=11, filesize=5.0 K 2024-11-16T20:37:47,959 DEBUG [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/data/hbase/meta/1588230740/.tmp/table/baace4f9e74142aab05d4d5b3d3bc6e1 as hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/data/hbase/meta/1588230740/table/baace4f9e74142aab05d4d5b3d3bc6e1 2024-11-16T20:37:47,965 INFO [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/data/hbase/meta/1588230740/table/baace4f9e74142aab05d4d5b3d3bc6e1, entries=2, sequenceid=11, filesize=5.4 K 2024-11-16T20:37:47,967 INFO [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 132ms, sequenceid=11, compaction requested=false 2024-11-16T20:37:47,971 DEBUG [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-16T20:37:47,971 DEBUG [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-16T20:37:47,972 INFO [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-16T20:37:47,972 DEBUG [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731789467834Running coprocessor pre-close hooks at 1731789467834Disabling compacts and flushes for region at 1731789467834Disabling writes for close at 1731789467834Obtaining lock to block concurrent updates at 1731789467834Preparing flush snapshotting stores in 1588230740 at 1731789467834Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1932, getHeapSize=3936, getOffHeapSize=0, getCellsCount=14 at 1731789467835 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731789467835Flushing 1588230740/info: creating writer at 1731789467835Flushing 1588230740/info: appending metadata at 1731789467855 (+20 ms)Flushing 1588230740/info: closing flushed file at 1731789467855Flushing 1588230740/ns: creating writer at 1731789467880 (+25 ms)Flushing 1588230740/ns: appending metadata at 1731789467908 (+28 ms)Flushing 1588230740/ns: closing flushed file at 1731789467908Flushing 1588230740/table: creating writer at 1731789467919 (+11 ms)Flushing 1588230740/table: appending metadata at 1731789467933 (+14 ms)Flushing 1588230740/table: closing flushed file at 1731789467933Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3f293a1c: reopening flushed file at 1731789467944 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@21f70fdf: reopening flushed file at 1731789467951 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@273d4f30: reopening flushed file at 1731789467958 (+7 ms)Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 132ms, sequenceid=11, compaction requested=false at 1731789467967 (+9 ms)Writing region close event to WAL at 1731789467968 (+1 ms)Running coprocessor post-close hooks at 1731789467971 (+3 ms)Closed at 1731789467971 2024-11-16T20:37:47,972 DEBUG [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-16T20:37:48,034 INFO [RS:0;40c018648b21:40467 {}] regionserver.HRegionServer(976): stopping server 40c018648b21,40467,1731789416066; all regions closed. 2024-11-16T20:37:48,035 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:37:48,035 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:37:48,035 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:37:48,035 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:37:48,035 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:37:48,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35211 is added to blk_1073741834_1010 (size=3306) 2024-11-16T20:37:48,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42185 is added to blk_1073741834_1010 (size=3306) 2024-11-16T20:37:48,039 DEBUG [RS:0;40c018648b21:40467 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/oldWALs 2024-11-16T20:37:48,039 INFO [RS:0;40c018648b21:40467 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 40c018648b21%2C40467%2C1731789416066.meta:.meta(num 1731789417125) 2024-11-16T20:37:48,040 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:37:48,040 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:37:48,040 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:37:48,040 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:37:48,040 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:37:48,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35211 is added to blk_1073741844_1020 (size=1252) 2024-11-16T20:37:48,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42185 is added to blk_1073741844_1020 (size=1252) 2024-11-16T20:37:48,045 DEBUG [RS:0;40c018648b21:40467 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/oldWALs 2024-11-16T20:37:48,045 INFO [RS:0;40c018648b21:40467 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 40c018648b21%2C40467%2C1731789416066:(num 1731789467758) 2024-11-16T20:37:48,045 DEBUG [RS:0;40c018648b21:40467 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T20:37:48,045 INFO [RS:0;40c018648b21:40467 {}] regionserver.LeaseManager(133): Closed leases 2024-11-16T20:37:48,045 INFO [RS:0;40c018648b21:40467 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-16T20:37:48,045 INFO [RS:0;40c018648b21:40467 {}] hbase.ChoreService(370): Chore service for: regionserver/40c018648b21:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-16T20:37:48,045 INFO [RS:0;40c018648b21:40467 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-16T20:37:48,045 INFO [regionserver/40c018648b21:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-16T20:37:48,045 INFO [RS:0;40c018648b21:40467 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40467 2024-11-16T20:37:48,073 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40467-0x101455d615e0001, quorum=127.0.0.1:53558, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/40c018648b21,40467,1731789416066 2024-11-16T20:37:48,073 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45571-0x101455d615e0000, quorum=127.0.0.1:53558, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-16T20:37:48,073 INFO [RS:0;40c018648b21:40467 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-16T20:37:48,083 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [40c018648b21,40467,1731789416066] 2024-11-16T20:37:48,094 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/40c018648b21,40467,1731789416066 already deleted, retry=false 2024-11-16T20:37:48,094 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 40c018648b21,40467,1731789416066 expired; onlineServers=0 2024-11-16T20:37:48,094 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '40c018648b21,45571,1731789415883' ***** 2024-11-16T20:37:48,094 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-16T20:37:48,094 INFO [M:0;40c018648b21:45571 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-16T20:37:48,094 INFO [M:0;40c018648b21:45571 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-16T20:37:48,094 DEBUG [M:0;40c018648b21:45571 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-16T20:37:48,094 DEBUG [M:0;40c018648b21:45571 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-16T20:37:48,094 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-16T20:37:48,094 DEBUG [master/40c018648b21:0:becomeActiveMaster-HFileCleaner.large.0-1731789416437 {}] cleaner.HFileCleaner(306): Exit Thread[master/40c018648b21:0:becomeActiveMaster-HFileCleaner.large.0-1731789416437,5,FailOnTimeoutGroup] 2024-11-16T20:37:48,094 DEBUG [master/40c018648b21:0:becomeActiveMaster-HFileCleaner.small.0-1731789416437 {}] cleaner.HFileCleaner(306): Exit Thread[master/40c018648b21:0:becomeActiveMaster-HFileCleaner.small.0-1731789416437,5,FailOnTimeoutGroup] 2024-11-16T20:37:48,094 INFO [M:0;40c018648b21:45571 {}] hbase.ChoreService(370): Chore service for: master/40c018648b21:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-16T20:37:48,094 INFO [M:0;40c018648b21:45571 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-16T20:37:48,095 DEBUG [M:0;40c018648b21:45571 {}] master.HMaster(1795): Stopping service threads 2024-11-16T20:37:48,095 INFO [M:0;40c018648b21:45571 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-16T20:37:48,095 INFO [M:0;40c018648b21:45571 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-16T20:37:48,095 INFO [M:0;40c018648b21:45571 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-16T20:37:48,095 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-16T20:37:48,104 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45571-0x101455d615e0000, quorum=127.0.0.1:53558, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-16T20:37:48,104 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45571-0x101455d615e0000, quorum=127.0.0.1:53558, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:37:48,104 DEBUG [M:0;40c018648b21:45571 {}] zookeeper.ZKUtil(347): master:45571-0x101455d615e0000, quorum=127.0.0.1:53558, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-16T20:37:48,104 WARN [M:0;40c018648b21:45571 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-16T20:37:48,105 INFO [M:0;40c018648b21:45571 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/.lastflushedseqids 2024-11-16T20:37:48,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42185 is added to blk_1073741849_1025 (size=130) 2024-11-16T20:37:48,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35211 is added to blk_1073741849_1025 (size=130) 2024-11-16T20:37:48,126 INFO [M:0;40c018648b21:45571 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-16T20:37:48,126 INFO [M:0;40c018648b21:45571 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-16T20:37:48,126 DEBUG [M:0;40c018648b21:45571 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-16T20:37:48,126 INFO [M:0;40c018648b21:45571 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T20:37:48,127 DEBUG [M:0;40c018648b21:45571 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T20:37:48,127 DEBUG [M:0;40c018648b21:45571 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-16T20:37:48,127 DEBUG [M:0;40c018648b21:45571 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T20:37:48,127 INFO [M:0;40c018648b21:45571 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=43.61 KB heapSize=55.02 KB 2024-11-16T20:37:48,144 DEBUG [M:0;40c018648b21:45571 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/433944e1f64241558e192798e58451be is 82, key is hbase:meta,,1/info:regioninfo/1731789417159/Put/seqid=0 2024-11-16T20:37:48,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42185 is added to blk_1073741850_1026 (size=5672) 2024-11-16T20:37:48,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35211 is added to blk_1073741850_1026 (size=5672) 2024-11-16T20:37:48,150 INFO [M:0;40c018648b21:45571 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/433944e1f64241558e192798e58451be 2024-11-16T20:37:48,170 DEBUG [M:0;40c018648b21:45571 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/f5c9b62aa9f6416eb0f7f42a6bff1d63 is 799, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731789417683/Put/seqid=0 2024-11-16T20:37:48,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42185 is added to blk_1073741851_1027 (size=7825) 2024-11-16T20:37:48,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35211 is added to blk_1073741851_1027 (size=7825) 2024-11-16T20:37:48,175 INFO [M:0;40c018648b21:45571 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=43.01 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/f5c9b62aa9f6416eb0f7f42a6bff1d63 2024-11-16T20:37:48,181 INFO [M:0;40c018648b21:45571 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for f5c9b62aa9f6416eb0f7f42a6bff1d63 2024-11-16T20:37:48,183 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40467-0x101455d615e0001, quorum=127.0.0.1:53558, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T20:37:48,183 INFO [RS:0;40c018648b21:40467 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-16T20:37:48,183 INFO [RS:0;40c018648b21:40467 {}] regionserver.HRegionServer(1031): Exiting; stopping=40c018648b21,40467,1731789416066; zookeeper connection closed. 2024-11-16T20:37:48,183 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40467-0x101455d615e0001, quorum=127.0.0.1:53558, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T20:37:48,184 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@758a1d14 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@758a1d14 2024-11-16T20:37:48,184 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-16T20:37:48,202 DEBUG [M:0;40c018648b21:45571 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/2b1d2e43c9a3494281533ab6ac884da4 is 69, key is 40c018648b21,40467,1731789416066/rs:state/1731789416525/Put/seqid=0 2024-11-16T20:37:48,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35211 is added to blk_1073741852_1028 (size=5156) 2024-11-16T20:37:48,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42185 is added to blk_1073741852_1028 (size=5156) 2024-11-16T20:37:48,207 INFO [M:0;40c018648b21:45571 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/2b1d2e43c9a3494281533ab6ac884da4 2024-11-16T20:37:48,228 DEBUG [M:0;40c018648b21:45571 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/5bdda914e976493cb586231efdf65ef1 is 52, key is load_balancer_on/state:d/1731789417304/Put/seqid=0 2024-11-16T20:37:48,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35211 is added to blk_1073741853_1029 (size=5056) 2024-11-16T20:37:48,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42185 is added to blk_1073741853_1029 (size=5056) 2024-11-16T20:37:48,234 INFO [M:0;40c018648b21:45571 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/5bdda914e976493cb586231efdf65ef1 2024-11-16T20:37:48,240 DEBUG [M:0;40c018648b21:45571 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/433944e1f64241558e192798e58451be as hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/433944e1f64241558e192798e58451be 2024-11-16T20:37:48,246 INFO [M:0;40c018648b21:45571 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/433944e1f64241558e192798e58451be, entries=8, sequenceid=121, filesize=5.5 K 2024-11-16T20:37:48,247 DEBUG [M:0;40c018648b21:45571 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/f5c9b62aa9f6416eb0f7f42a6bff1d63 as hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/f5c9b62aa9f6416eb0f7f42a6bff1d63 2024-11-16T20:37:48,253 INFO [M:0;40c018648b21:45571 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for f5c9b62aa9f6416eb0f7f42a6bff1d63 2024-11-16T20:37:48,253 INFO [M:0;40c018648b21:45571 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/f5c9b62aa9f6416eb0f7f42a6bff1d63, entries=14, sequenceid=121, filesize=7.6 K 2024-11-16T20:37:48,254 DEBUG [M:0;40c018648b21:45571 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/2b1d2e43c9a3494281533ab6ac884da4 as hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/2b1d2e43c9a3494281533ab6ac884da4 2024-11-16T20:37:48,260 INFO [M:0;40c018648b21:45571 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/2b1d2e43c9a3494281533ab6ac884da4, entries=1, sequenceid=121, filesize=5.0 K 2024-11-16T20:37:48,261 DEBUG [M:0;40c018648b21:45571 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/5bdda914e976493cb586231efdf65ef1 as hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/5bdda914e976493cb586231efdf65ef1 2024-11-16T20:37:48,266 INFO [M:0;40c018648b21:45571 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43375/user/jenkins/test-data/dd403c48-1d95-0b91-7bd5-567595ba0a36/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/5bdda914e976493cb586231efdf65ef1, entries=1, sequenceid=121, filesize=4.9 K 2024-11-16T20:37:48,268 INFO [M:0;40c018648b21:45571 {}] regionserver.HRegion(3140): Finished flush of dataSize ~43.61 KB/44659, heapSize ~54.95 KB/56272, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 141ms, sequenceid=121, compaction requested=false 2024-11-16T20:37:48,275 INFO [M:0;40c018648b21:45571 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T20:37:48,275 DEBUG [M:0;40c018648b21:45571 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731789468126Disabling compacts and flushes for region at 1731789468126Disabling writes for close at 1731789468127 (+1 ms)Obtaining lock to block concurrent updates at 1731789468127Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731789468127Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=44659, getHeapSize=56272, getOffHeapSize=0, getCellsCount=140 at 1731789468127Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731789468128 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731789468128Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731789468144 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731789468144Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731789468155 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731789468169 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731789468169Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731789468181 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731789468201 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731789468201Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731789468213 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731789468228 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731789468228Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@75e5fa77: reopening flushed file at 1731789468239 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@eaf9614: reopening flushed file at 1731789468246 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@43e481d9: reopening flushed file at 1731789468253 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@791f4590: reopening flushed file at 1731789468260 (+7 ms)Finished flush of dataSize ~43.61 KB/44659, heapSize ~54.95 KB/56272, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 141ms, sequenceid=121, compaction requested=false at 1731789468268 (+8 ms)Writing region close event to WAL at 1731789468275 (+7 ms)Closed at 1731789468275 2024-11-16T20:37:48,275 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:37:48,276 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:37:48,276 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:37:48,276 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:37:48,276 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:37:48,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35211 is added to blk_1073741830_1006 (size=53056) 2024-11-16T20:37:48,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42185 is added to blk_1073741830_1006 (size=53056) 2024-11-16T20:37:48,278 INFO [M:0;40c018648b21:45571 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-16T20:37:48,278 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-16T20:37:48,278 INFO [M:0;40c018648b21:45571 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45571 2024-11-16T20:37:48,279 INFO [M:0;40c018648b21:45571 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-16T20:37:48,454 INFO [M:0;40c018648b21:45571 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-16T20:37:48,454 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45571-0x101455d615e0000, quorum=127.0.0.1:53558, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T20:37:48,454 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45571-0x101455d615e0000, quorum=127.0.0.1:53558, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T20:37:48,504 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@38b8be0d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T20:37:48,504 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@34534d9{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T20:37:48,505 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T20:37:48,505 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@757e4aa2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T20:37:48,505 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@317cfbff{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/430f53aa-c7a1-d61a-ad84-818e915da09e/hadoop.log.dir/,STOPPED} 2024-11-16T20:37:48,506 WARN [BP-1017383234-172.17.0.2-1731789413326 heartbeating to localhost/127.0.0.1:43375 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T20:37:48,506 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T20:37:48,506 WARN [BP-1017383234-172.17.0.2-1731789413326 heartbeating to localhost/127.0.0.1:43375 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1017383234-172.17.0.2-1731789413326 (Datanode Uuid 739f6543-dfe3-4271-995a-006aaa885773) service to localhost/127.0.0.1:43375 2024-11-16T20:37:48,506 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T20:37:48,507 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/430f53aa-c7a1-d61a-ad84-818e915da09e/cluster_2135b74f-5a03-20cb-8b39-1582a9f18290/data/data3/current/BP-1017383234-172.17.0.2-1731789413326 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T20:37:48,507 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/430f53aa-c7a1-d61a-ad84-818e915da09e/cluster_2135b74f-5a03-20cb-8b39-1582a9f18290/data/data4/current/BP-1017383234-172.17.0.2-1731789413326 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T20:37:48,507 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T20:37:48,509 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@61e16297{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T20:37:48,510 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4faae08f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T20:37:48,510 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T20:37:48,510 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@678c2527{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T20:37:48,510 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3dc731d8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/430f53aa-c7a1-d61a-ad84-818e915da09e/hadoop.log.dir/,STOPPED} 2024-11-16T20:37:48,511 WARN [BP-1017383234-172.17.0.2-1731789413326 heartbeating to localhost/127.0.0.1:43375 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T20:37:48,511 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T20:37:48,511 WARN [BP-1017383234-172.17.0.2-1731789413326 heartbeating to localhost/127.0.0.1:43375 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1017383234-172.17.0.2-1731789413326 (Datanode Uuid 3ac1ff5c-7770-40b7-930c-2d06b71de9ab) service to localhost/127.0.0.1:43375 2024-11-16T20:37:48,511 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T20:37:48,512 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/430f53aa-c7a1-d61a-ad84-818e915da09e/cluster_2135b74f-5a03-20cb-8b39-1582a9f18290/data/data1/current/BP-1017383234-172.17.0.2-1731789413326 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T20:37:48,512 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/430f53aa-c7a1-d61a-ad84-818e915da09e/cluster_2135b74f-5a03-20cb-8b39-1582a9f18290/data/data2/current/BP-1017383234-172.17.0.2-1731789413326 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T20:37:48,512 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T20:37:48,519 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@39212263{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-16T20:37:48,519 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@20f59884{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T20:37:48,519 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T20:37:48,519 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@b24fbcd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T20:37:48,519 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5ea88c4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/430f53aa-c7a1-d61a-ad84-818e915da09e/hadoop.log.dir/,STOPPED} 2024-11-16T20:37:48,525 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-16T20:37:48,545 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-16T20:37:48,552 INFO [regionserver/40c018648b21:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-16T20:37:48,552 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=209 (was 183) Potentially hanging thread: HMaster-EventLoopGroup-12-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43375 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:43375 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:43375 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-12-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43375 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43375 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:43375 from jenkins.hfs.5 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-12-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:43375 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.5@localhost:43375 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/40c018648b21:0.leaseChecker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.regionserver.LeaseManager.run(LeaseManager.java:82) - Thread LEAK? -, OpenFileDescriptor=485 (was 457) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=185 (was 222), ProcessCount=11 (was 11), AvailableMemoryMB=3779 (was 3920) 2024-11-16T20:37:48,560 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRolling Thread=208, OpenFileDescriptor=485, MaxFileDescriptor=1048576, SystemLoadAverage=185, ProcessCount=11, AvailableMemoryMB=3779 2024-11-16T20:37:48,560 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-16T20:37:48,560 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/430f53aa-c7a1-d61a-ad84-818e915da09e/hadoop.log.dir so I do NOT create it in target/test-data/072b365f-e388-eccd-93c2-d32092a81811 2024-11-16T20:37:48,560 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/430f53aa-c7a1-d61a-ad84-818e915da09e/hadoop.tmp.dir so I do NOT create it in target/test-data/072b365f-e388-eccd-93c2-d32092a81811 2024-11-16T20:37:48,560 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/072b365f-e388-eccd-93c2-d32092a81811/cluster_f3190f36-3be0-ff43-3a58-878acd87bed2, deleteOnExit=true 2024-11-16T20:37:48,560 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-16T20:37:48,561 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/072b365f-e388-eccd-93c2-d32092a81811/test.cache.data in system properties and HBase conf 2024-11-16T20:37:48,561 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/072b365f-e388-eccd-93c2-d32092a81811/hadoop.tmp.dir in system properties and HBase conf 2024-11-16T20:37:48,561 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/072b365f-e388-eccd-93c2-d32092a81811/hadoop.log.dir in system properties and HBase conf 2024-11-16T20:37:48,561 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/072b365f-e388-eccd-93c2-d32092a81811/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-16T20:37:48,561 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/072b365f-e388-eccd-93c2-d32092a81811/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-16T20:37:48,561 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-16T20:37:48,561 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-16T20:37:48,561 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/072b365f-e388-eccd-93c2-d32092a81811/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-16T20:37:48,561 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/072b365f-e388-eccd-93c2-d32092a81811/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-16T20:37:48,561 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/072b365f-e388-eccd-93c2-d32092a81811/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-16T20:37:48,562 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/072b365f-e388-eccd-93c2-d32092a81811/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-16T20:37:48,562 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/072b365f-e388-eccd-93c2-d32092a81811/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-16T20:37:48,562 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/072b365f-e388-eccd-93c2-d32092a81811/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-16T20:37:48,562 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/072b365f-e388-eccd-93c2-d32092a81811/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-16T20:37:48,562 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/072b365f-e388-eccd-93c2-d32092a81811/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-16T20:37:48,562 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/072b365f-e388-eccd-93c2-d32092a81811/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-16T20:37:48,562 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/072b365f-e388-eccd-93c2-d32092a81811/nfs.dump.dir in system properties and HBase conf 2024-11-16T20:37:48,562 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/072b365f-e388-eccd-93c2-d32092a81811/java.io.tmpdir in system properties and HBase conf 2024-11-16T20:37:48,562 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/072b365f-e388-eccd-93c2-d32092a81811/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-16T20:37:48,562 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/072b365f-e388-eccd-93c2-d32092a81811/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-16T20:37:48,562 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/072b365f-e388-eccd-93c2-d32092a81811/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-16T20:37:48,576 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-16T20:37:48,703 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:48,703 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:48,923 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T20:37:48,928 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T20:37:48,929 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T20:37:48,929 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T20:37:48,929 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-16T20:37:48,930 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T20:37:48,930 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@45bda0cb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/072b365f-e388-eccd-93c2-d32092a81811/hadoop.log.dir/,AVAILABLE} 2024-11-16T20:37:48,930 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4881a2ed{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T20:37:49,033 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@43909889{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/072b365f-e388-eccd-93c2-d32092a81811/java.io.tmpdir/jetty-localhost-43409-hadoop-hdfs-3_4_1-tests_jar-_-any-3551761278744837178/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-16T20:37:49,033 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4ac7d52f{HTTP/1.1, (http/1.1)}{localhost:43409} 2024-11-16T20:37:49,033 INFO [Time-limited test {}] server.Server(415): Started @250865ms 2024-11-16T20:37:49,045 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-16T20:37:49,338 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T20:37:49,341 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T20:37:49,342 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T20:37:49,342 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T20:37:49,342 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-16T20:37:49,342 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@41b7d19a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/072b365f-e388-eccd-93c2-d32092a81811/hadoop.log.dir/,AVAILABLE} 2024-11-16T20:37:49,343 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5551c062{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T20:37:49,456 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1204fb24{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/072b365f-e388-eccd-93c2-d32092a81811/java.io.tmpdir/jetty-localhost-39817-hadoop-hdfs-3_4_1-tests_jar-_-any-7491154503760132212/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T20:37:49,456 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@23e1642c{HTTP/1.1, (http/1.1)}{localhost:39817} 2024-11-16T20:37:49,456 INFO [Time-limited test {}] server.Server(415): Started @251289ms 2024-11-16T20:37:49,458 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T20:37:49,485 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T20:37:49,488 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T20:37:49,488 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T20:37:49,488 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T20:37:49,488 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-16T20:37:49,489 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4437c7ec{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/072b365f-e388-eccd-93c2-d32092a81811/hadoop.log.dir/,AVAILABLE} 2024-11-16T20:37:49,489 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6a5db76d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T20:37:49,589 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5ce0a24{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/072b365f-e388-eccd-93c2-d32092a81811/java.io.tmpdir/jetty-localhost-39383-hadoop-hdfs-3_4_1-tests_jar-_-any-9578308817887396821/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T20:37:49,589 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@47bcda8c{HTTP/1.1, (http/1.1)}{localhost:39383} 2024-11-16T20:37:49,589 INFO [Time-limited test {}] server.Server(415): Started @251422ms 2024-11-16T20:37:49,591 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T20:37:49,703 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:49,703 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:50,662 WARN [Thread-1966 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/072b365f-e388-eccd-93c2-d32092a81811/cluster_f3190f36-3be0-ff43-3a58-878acd87bed2/data/data1/current/BP-1515412332-172.17.0.2-1731789468579/current, will proceed with Du for space computation calculation, 2024-11-16T20:37:50,663 WARN [Thread-1967 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/072b365f-e388-eccd-93c2-d32092a81811/cluster_f3190f36-3be0-ff43-3a58-878acd87bed2/data/data2/current/BP-1515412332-172.17.0.2-1731789468579/current, will proceed with Du for space computation calculation, 2024-11-16T20:37:50,685 WARN [Thread-1930 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T20:37:50,687 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4dff11cd26680625 with lease ID 0xe0ae9742449d43f5: Processing first storage report for DS-54f86952-760f-497f-9cd2-a07aa73a199d from datanode DatanodeRegistration(127.0.0.1:40163, datanodeUuid=b5d1c7e4-d556-47ae-b8c1-1a6d504ca25a, infoPort=35917, infoSecurePort=0, ipcPort=38819, storageInfo=lv=-57;cid=testClusterID;nsid=1257309219;c=1731789468579) 2024-11-16T20:37:50,687 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4dff11cd26680625 with lease ID 0xe0ae9742449d43f5: from storage DS-54f86952-760f-497f-9cd2-a07aa73a199d node DatanodeRegistration(127.0.0.1:40163, datanodeUuid=b5d1c7e4-d556-47ae-b8c1-1a6d504ca25a, infoPort=35917, infoSecurePort=0, ipcPort=38819, storageInfo=lv=-57;cid=testClusterID;nsid=1257309219;c=1731789468579), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-16T20:37:50,687 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4dff11cd26680625 with lease ID 0xe0ae9742449d43f5: Processing first storage report for DS-ad088b89-20a9-4a89-b1f2-ec34961110a0 from datanode DatanodeRegistration(127.0.0.1:40163, datanodeUuid=b5d1c7e4-d556-47ae-b8c1-1a6d504ca25a, infoPort=35917, infoSecurePort=0, ipcPort=38819, storageInfo=lv=-57;cid=testClusterID;nsid=1257309219;c=1731789468579) 2024-11-16T20:37:50,687 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4dff11cd26680625 with lease ID 0xe0ae9742449d43f5: from storage DS-ad088b89-20a9-4a89-b1f2-ec34961110a0 node DatanodeRegistration(127.0.0.1:40163, datanodeUuid=b5d1c7e4-d556-47ae-b8c1-1a6d504ca25a, infoPort=35917, infoSecurePort=0, ipcPort=38819, storageInfo=lv=-57;cid=testClusterID;nsid=1257309219;c=1731789468579), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T20:37:50,704 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:50,704 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:50,959 WARN [Thread-1978 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/072b365f-e388-eccd-93c2-d32092a81811/cluster_f3190f36-3be0-ff43-3a58-878acd87bed2/data/data4/current/BP-1515412332-172.17.0.2-1731789468579/current, will proceed with Du for space computation calculation, 2024-11-16T20:37:50,959 WARN [Thread-1977 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/072b365f-e388-eccd-93c2-d32092a81811/cluster_f3190f36-3be0-ff43-3a58-878acd87bed2/data/data3/current/BP-1515412332-172.17.0.2-1731789468579/current, will proceed with Du for space computation calculation, 2024-11-16T20:37:50,978 WARN [Thread-1953 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T20:37:50,980 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x15e2e427f5708bde with lease ID 0xe0ae9742449d43f6: Processing first storage report for DS-b3ec086e-597d-4459-9baa-6e3149735e2d from datanode DatanodeRegistration(127.0.0.1:34309, datanodeUuid=b502e2d5-d5b4-4b23-aa3a-d3eef8615f41, infoPort=41755, infoSecurePort=0, ipcPort=38979, storageInfo=lv=-57;cid=testClusterID;nsid=1257309219;c=1731789468579) 2024-11-16T20:37:50,980 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x15e2e427f5708bde with lease ID 0xe0ae9742449d43f6: from storage DS-b3ec086e-597d-4459-9baa-6e3149735e2d node DatanodeRegistration(127.0.0.1:34309, datanodeUuid=b502e2d5-d5b4-4b23-aa3a-d3eef8615f41, infoPort=41755, infoSecurePort=0, ipcPort=38979, storageInfo=lv=-57;cid=testClusterID;nsid=1257309219;c=1731789468579), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T20:37:50,980 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x15e2e427f5708bde with lease ID 0xe0ae9742449d43f6: Processing first storage report for DS-fc040c0a-9556-4f09-9b70-739286d214ca from datanode DatanodeRegistration(127.0.0.1:34309, datanodeUuid=b502e2d5-d5b4-4b23-aa3a-d3eef8615f41, infoPort=41755, infoSecurePort=0, ipcPort=38979, storageInfo=lv=-57;cid=testClusterID;nsid=1257309219;c=1731789468579) 2024-11-16T20:37:50,980 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x15e2e427f5708bde with lease ID 0xe0ae9742449d43f6: from storage DS-fc040c0a-9556-4f09-9b70-739286d214ca node DatanodeRegistration(127.0.0.1:34309, datanodeUuid=b502e2d5-d5b4-4b23-aa3a-d3eef8615f41, infoPort=41755, infoSecurePort=0, ipcPort=38979, storageInfo=lv=-57;cid=testClusterID;nsid=1257309219;c=1731789468579), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T20:37:51,022 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/072b365f-e388-eccd-93c2-d32092a81811 2024-11-16T20:37:51,026 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/072b365f-e388-eccd-93c2-d32092a81811/cluster_f3190f36-3be0-ff43-3a58-878acd87bed2/zookeeper_0, clientPort=56747, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/072b365f-e388-eccd-93c2-d32092a81811/cluster_f3190f36-3be0-ff43-3a58-878acd87bed2/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/072b365f-e388-eccd-93c2-d32092a81811/cluster_f3190f36-3be0-ff43-3a58-878acd87bed2/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-16T20:37:51,027 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=56747 2024-11-16T20:37:51,027 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T20:37:51,029 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T20:37:51,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40163 is added to blk_1073741825_1001 (size=7) 2024-11-16T20:37:51,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34309 is added to blk_1073741825_1001 (size=7) 2024-11-16T20:37:51,040 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985 with version=8 2024-11-16T20:37:51,040 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/hbase-staging 2024-11-16T20:37:51,042 INFO [Time-limited test {}] client.ConnectionUtils(128): master/40c018648b21:0 server-side Connection retries=45 2024-11-16T20:37:51,042 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T20:37:51,043 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-16T20:37:51,043 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-16T20:37:51,043 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T20:37:51,043 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-16T20:37:51,043 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-16T20:37:51,043 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-16T20:37:51,044 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44513 2024-11-16T20:37:51,045 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:44513 connecting to ZooKeeper ensemble=127.0.0.1:56747 2024-11-16T20:37:51,116 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:445130x0, quorum=127.0.0.1:56747, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-16T20:37:51,116 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:44513-0x101455e38dc0000 connected 2024-11-16T20:37:51,198 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T20:37:51,200 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T20:37:51,201 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44513-0x101455e38dc0000, quorum=127.0.0.1:56747, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T20:37:51,201 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985, hbase.cluster.distributed=false 2024-11-16T20:37:51,203 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44513-0x101455e38dc0000, quorum=127.0.0.1:56747, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-16T20:37:51,204 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44513 2024-11-16T20:37:51,204 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44513 2024-11-16T20:37:51,204 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44513 2024-11-16T20:37:51,204 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44513 2024-11-16T20:37:51,205 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44513 2024-11-16T20:37:51,223 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/40c018648b21:0 server-side Connection retries=45 2024-11-16T20:37:51,223 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T20:37:51,223 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-16T20:37:51,223 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-16T20:37:51,223 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T20:37:51,223 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-16T20:37:51,223 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-16T20:37:51,223 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-16T20:37:51,224 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:43411 2024-11-16T20:37:51,225 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:43411 connecting to ZooKeeper ensemble=127.0.0.1:56747 2024-11-16T20:37:51,226 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T20:37:51,228 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T20:37:51,240 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:434110x0, quorum=127.0.0.1:56747, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-16T20:37:51,240 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:434110x0, quorum=127.0.0.1:56747, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T20:37:51,240 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:43411-0x101455e38dc0001 connected 2024-11-16T20:37:51,241 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-16T20:37:51,241 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-16T20:37:51,242 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43411-0x101455e38dc0001, quorum=127.0.0.1:56747, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-16T20:37:51,243 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43411-0x101455e38dc0001, quorum=127.0.0.1:56747, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-16T20:37:51,243 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43411 2024-11-16T20:37:51,243 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43411 2024-11-16T20:37:51,244 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43411 2024-11-16T20:37:51,244 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43411 2024-11-16T20:37:51,245 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43411 2024-11-16T20:37:51,259 DEBUG [M:0;40c018648b21:44513 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;40c018648b21:44513 2024-11-16T20:37:51,259 INFO [master/40c018648b21:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/40c018648b21,44513,1731789471042 2024-11-16T20:37:51,271 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44513-0x101455e38dc0000, quorum=127.0.0.1:56747, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T20:37:51,271 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43411-0x101455e38dc0001, quorum=127.0.0.1:56747, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T20:37:51,272 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44513-0x101455e38dc0000, quorum=127.0.0.1:56747, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/40c018648b21,44513,1731789471042 2024-11-16T20:37:51,282 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44513-0x101455e38dc0000, quorum=127.0.0.1:56747, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:37:51,282 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43411-0x101455e38dc0001, quorum=127.0.0.1:56747, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-16T20:37:51,282 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43411-0x101455e38dc0001, quorum=127.0.0.1:56747, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:37:51,282 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44513-0x101455e38dc0000, quorum=127.0.0.1:56747, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-16T20:37:51,283 INFO [master/40c018648b21:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/40c018648b21,44513,1731789471042 from backup master directory 2024-11-16T20:37:51,293 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44513-0x101455e38dc0000, quorum=127.0.0.1:56747, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/40c018648b21,44513,1731789471042 2024-11-16T20:37:51,293 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43411-0x101455e38dc0001, quorum=127.0.0.1:56747, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T20:37:51,293 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44513-0x101455e38dc0000, quorum=127.0.0.1:56747, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T20:37:51,293 WARN [master/40c018648b21:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-16T20:37:51,293 INFO [master/40c018648b21:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=40c018648b21,44513,1731789471042 2024-11-16T20:37:51,296 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/hbase.id] with ID: 3bfe0e51-d09b-47d5-b951-598fd70556ac 2024-11-16T20:37:51,296 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/.tmp/hbase.id 2024-11-16T20:37:51,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40163 is added to blk_1073741826_1002 (size=42) 2024-11-16T20:37:51,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34309 is added to blk_1073741826_1002 (size=42) 2024-11-16T20:37:51,302 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/.tmp/hbase.id]:[hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/hbase.id] 2024-11-16T20:37:51,312 INFO [master/40c018648b21:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T20:37:51,312 INFO [master/40c018648b21:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-16T20:37:51,314 INFO [master/40c018648b21:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-16T20:37:51,324 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43411-0x101455e38dc0001, quorum=127.0.0.1:56747, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:37:51,324 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44513-0x101455e38dc0000, quorum=127.0.0.1:56747, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:37:51,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34309 is added to blk_1073741827_1003 (size=196) 2024-11-16T20:37:51,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40163 is added to blk_1073741827_1003 (size=196) 2024-11-16T20:37:51,332 INFO [master/40c018648b21:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-16T20:37:51,333 INFO [master/40c018648b21:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-16T20:37:51,333 INFO [master/40c018648b21:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T20:37:51,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34309 is added to blk_1073741828_1004 (size=1189) 2024-11-16T20:37:51,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40163 is added to blk_1073741828_1004 (size=1189) 2024-11-16T20:37:51,342 INFO [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/MasterData/data/master/store 2024-11-16T20:37:51,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34309 is added to blk_1073741829_1005 (size=34) 2024-11-16T20:37:51,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40163 is added to blk_1073741829_1005 (size=34) 2024-11-16T20:37:51,349 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T20:37:51,349 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-16T20:37:51,349 INFO [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T20:37:51,349 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T20:37:51,349 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-16T20:37:51,349 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T20:37:51,349 INFO [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T20:37:51,349 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731789471349Disabling compacts and flushes for region at 1731789471349Disabling writes for close at 1731789471349Writing region close event to WAL at 1731789471349Closed at 1731789471349 2024-11-16T20:37:51,350 WARN [master/40c018648b21:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/MasterData/data/master/store/.initializing 2024-11-16T20:37:51,350 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/MasterData/WALs/40c018648b21,44513,1731789471042 2024-11-16T20:37:51,352 INFO [master/40c018648b21:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=40c018648b21%2C44513%2C1731789471042, suffix=, logDir=hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/MasterData/WALs/40c018648b21,44513,1731789471042, archiveDir=hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/MasterData/oldWALs, maxLogs=10 2024-11-16T20:37:51,353 INFO [master/40c018648b21:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 40c018648b21%2C44513%2C1731789471042.1731789471352 2024-11-16T20:37:51,358 INFO [master/40c018648b21:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/MasterData/WALs/40c018648b21,44513,1731789471042/40c018648b21%2C44513%2C1731789471042.1731789471352 2024-11-16T20:37:51,359 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41755:41755),(127.0.0.1/127.0.0.1:35917:35917)] 2024-11-16T20:37:51,359 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-16T20:37:51,360 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T20:37:51,360 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T20:37:51,360 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T20:37:51,361 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T20:37:51,362 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-16T20:37:51,363 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:37:51,363 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T20:37:51,363 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T20:37:51,364 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-16T20:37:51,365 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:37:51,365 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T20:37:51,365 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T20:37:51,366 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-16T20:37:51,366 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:37:51,367 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T20:37:51,367 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T20:37:51,368 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-16T20:37:51,368 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:37:51,369 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T20:37:51,369 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T20:37:51,370 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-16T20:37:51,370 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-16T20:37:51,371 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T20:37:51,371 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T20:37:51,372 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-16T20:37:51,373 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T20:37:51,375 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T20:37:51,375 INFO [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=781246, jitterRate=-0.006595596671104431}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-16T20:37:51,376 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731789471360Initializing all the Stores at 1731789471361 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731789471361Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731789471361Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731789471361Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731789471361Cleaning up temporary data from old regions at 1731789471371 (+10 ms)Region opened successfully at 1731789471376 (+5 ms) 2024-11-16T20:37:51,376 INFO [master/40c018648b21:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-16T20:37:51,379 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@12642627, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=40c018648b21/172.17.0.2:0 2024-11-16T20:37:51,379 INFO [master/40c018648b21:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-16T20:37:51,380 INFO [master/40c018648b21:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-16T20:37:51,380 INFO [master/40c018648b21:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-16T20:37:51,380 INFO [master/40c018648b21:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-16T20:37:51,380 INFO [master/40c018648b21:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-16T20:37:51,381 INFO [master/40c018648b21:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-16T20:37:51,381 INFO [master/40c018648b21:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-16T20:37:51,383 INFO [master/40c018648b21:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-16T20:37:51,384 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44513-0x101455e38dc0000, quorum=127.0.0.1:56747, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-16T20:37:51,408 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-16T20:37:51,409 INFO [master/40c018648b21:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-16T20:37:51,409 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44513-0x101455e38dc0000, quorum=127.0.0.1:56747, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-16T20:37:51,419 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-16T20:37:51,419 INFO [master/40c018648b21:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-16T20:37:51,420 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44513-0x101455e38dc0000, quorum=127.0.0.1:56747, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-16T20:37:51,429 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-16T20:37:51,430 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44513-0x101455e38dc0000, quorum=127.0.0.1:56747, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-16T20:37:51,440 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-16T20:37:51,442 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44513-0x101455e38dc0000, quorum=127.0.0.1:56747, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-16T20:37:51,450 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-16T20:37:51,461 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43411-0x101455e38dc0001, quorum=127.0.0.1:56747, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-16T20:37:51,461 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44513-0x101455e38dc0000, quorum=127.0.0.1:56747, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-16T20:37:51,461 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43411-0x101455e38dc0001, quorum=127.0.0.1:56747, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:37:51,461 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44513-0x101455e38dc0000, quorum=127.0.0.1:56747, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:37:51,462 INFO [master/40c018648b21:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=40c018648b21,44513,1731789471042, sessionid=0x101455e38dc0000, setting cluster-up flag (Was=false) 2024-11-16T20:37:51,482 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43411-0x101455e38dc0001, quorum=127.0.0.1:56747, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:37:51,482 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44513-0x101455e38dc0000, quorum=127.0.0.1:56747, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:37:51,514 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-16T20:37:51,515 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=40c018648b21,44513,1731789471042 2024-11-16T20:37:51,535 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43411-0x101455e38dc0001, quorum=127.0.0.1:56747, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:37:51,535 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44513-0x101455e38dc0000, quorum=127.0.0.1:56747, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:37:51,566 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-16T20:37:51,567 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=40c018648b21,44513,1731789471042 2024-11-16T20:37:51,569 INFO [master/40c018648b21:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-16T20:37:51,571 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-16T20:37:51,571 INFO [master/40c018648b21:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-16T20:37:51,571 INFO [master/40c018648b21:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-16T20:37:51,571 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 40c018648b21,44513,1731789471042 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-16T20:37:51,573 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/40c018648b21:0, corePoolSize=5, maxPoolSize=5 2024-11-16T20:37:51,573 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/40c018648b21:0, corePoolSize=5, maxPoolSize=5 2024-11-16T20:37:51,573 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/40c018648b21:0, corePoolSize=5, maxPoolSize=5 2024-11-16T20:37:51,573 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/40c018648b21:0, corePoolSize=5, maxPoolSize=5 2024-11-16T20:37:51,573 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/40c018648b21:0, corePoolSize=10, maxPoolSize=10 2024-11-16T20:37:51,573 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:37:51,573 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/40c018648b21:0, corePoolSize=2, maxPoolSize=2 2024-11-16T20:37:51,573 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:37:51,576 INFO [master/40c018648b21:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731789501576 2024-11-16T20:37:51,576 INFO [master/40c018648b21:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-16T20:37:51,577 INFO [master/40c018648b21:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-16T20:37:51,577 INFO [master/40c018648b21:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-16T20:37:51,577 INFO [master/40c018648b21:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-16T20:37:51,577 INFO [master/40c018648b21:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-16T20:37:51,577 INFO [master/40c018648b21:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-16T20:37:51,577 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T20:37:51,578 INFO [master/40c018648b21:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-16T20:37:51,578 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-16T20:37:51,578 INFO [master/40c018648b21:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-16T20:37:51,578 INFO [master/40c018648b21:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-16T20:37:51,578 INFO [master/40c018648b21:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-16T20:37:51,579 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:37:51,579 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-16T20:37:51,584 INFO [master/40c018648b21:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-16T20:37:51,584 INFO [master/40c018648b21:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-16T20:37:51,588 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/40c018648b21:0:becomeActiveMaster-HFileCleaner.large.0-1731789471584,5,FailOnTimeoutGroup] 2024-11-16T20:37:51,588 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/40c018648b21:0:becomeActiveMaster-HFileCleaner.small.0-1731789471588,5,FailOnTimeoutGroup] 2024-11-16T20:37:51,588 INFO [master/40c018648b21:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-16T20:37:51,589 INFO [master/40c018648b21:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-16T20:37:51,589 INFO [master/40c018648b21:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-16T20:37:51,589 INFO [master/40c018648b21:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-16T20:37:51,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40163 is added to blk_1073741831_1007 (size=1321) 2024-11-16T20:37:51,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34309 is added to blk_1073741831_1007 (size=1321) 2024-11-16T20:37:51,594 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-16T20:37:51,594 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985 2024-11-16T20:37:51,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34309 is added to blk_1073741832_1008 (size=32) 2024-11-16T20:37:51,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40163 is added to blk_1073741832_1008 (size=32) 2024-11-16T20:37:51,646 INFO [RS:0;40c018648b21:43411 {}] regionserver.HRegionServer(746): ClusterId : 3bfe0e51-d09b-47d5-b951-598fd70556ac 2024-11-16T20:37:51,646 DEBUG [RS:0;40c018648b21:43411 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-16T20:37:51,659 DEBUG [RS:0;40c018648b21:43411 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-16T20:37:51,659 DEBUG [RS:0;40c018648b21:43411 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-16T20:37:51,672 DEBUG [RS:0;40c018648b21:43411 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-16T20:37:51,673 DEBUG [RS:0;40c018648b21:43411 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6ca9ab63, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=40c018648b21/172.17.0.2:0 2024-11-16T20:37:51,688 DEBUG [RS:0;40c018648b21:43411 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;40c018648b21:43411 2024-11-16T20:37:51,688 INFO [RS:0;40c018648b21:43411 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-16T20:37:51,688 INFO [RS:0;40c018648b21:43411 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-16T20:37:51,688 DEBUG [RS:0;40c018648b21:43411 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-16T20:37:51,689 INFO [RS:0;40c018648b21:43411 {}] regionserver.HRegionServer(2659): reportForDuty to master=40c018648b21,44513,1731789471042 with port=43411, startcode=1731789471222 2024-11-16T20:37:51,689 DEBUG [RS:0;40c018648b21:43411 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-16T20:37:51,691 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39891, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.6 (auth:SIMPLE), service=RegionServerStatusService 2024-11-16T20:37:51,692 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44513 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 40c018648b21,43411,1731789471222 2024-11-16T20:37:51,692 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44513 {}] master.ServerManager(517): Registering regionserver=40c018648b21,43411,1731789471222 2024-11-16T20:37:51,694 DEBUG [RS:0;40c018648b21:43411 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985 2024-11-16T20:37:51,694 DEBUG [RS:0;40c018648b21:43411 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:43583 2024-11-16T20:37:51,694 DEBUG [RS:0;40c018648b21:43411 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-16T20:37:51,703 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44513-0x101455e38dc0000, quorum=127.0.0.1:56747, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-16T20:37:51,703 DEBUG [RS:0;40c018648b21:43411 {}] zookeeper.ZKUtil(111): regionserver:43411-0x101455e38dc0001, quorum=127.0.0.1:56747, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/40c018648b21,43411,1731789471222 2024-11-16T20:37:51,704 WARN [RS:0;40c018648b21:43411 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-16T20:37:51,704 INFO [RS:0;40c018648b21:43411 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T20:37:51,704 DEBUG [RS:0;40c018648b21:43411 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/WALs/40c018648b21,43411,1731789471222 2024-11-16T20:37:51,704 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [40c018648b21,43411,1731789471222] 2024-11-16T20:37:51,705 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:51,705 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:51,707 INFO [RS:0;40c018648b21:43411 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-16T20:37:51,709 INFO [RS:0;40c018648b21:43411 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-16T20:37:51,710 INFO [RS:0;40c018648b21:43411 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-16T20:37:51,710 INFO [RS:0;40c018648b21:43411 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T20:37:51,710 INFO [RS:0;40c018648b21:43411 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-16T20:37:51,711 INFO [RS:0;40c018648b21:43411 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-16T20:37:51,711 INFO [RS:0;40c018648b21:43411 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-16T20:37:51,711 DEBUG [RS:0;40c018648b21:43411 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:37:51,711 DEBUG [RS:0;40c018648b21:43411 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:37:51,711 DEBUG [RS:0;40c018648b21:43411 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:37:51,711 DEBUG [RS:0;40c018648b21:43411 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:37:51,711 DEBUG [RS:0;40c018648b21:43411 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:37:51,711 DEBUG [RS:0;40c018648b21:43411 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/40c018648b21:0, corePoolSize=2, maxPoolSize=2 2024-11-16T20:37:51,711 DEBUG [RS:0;40c018648b21:43411 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:37:51,711 DEBUG [RS:0;40c018648b21:43411 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:37:51,711 DEBUG [RS:0;40c018648b21:43411 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:37:51,711 DEBUG [RS:0;40c018648b21:43411 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:37:51,711 DEBUG [RS:0;40c018648b21:43411 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:37:51,712 DEBUG [RS:0;40c018648b21:43411 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:37:51,712 DEBUG [RS:0;40c018648b21:43411 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/40c018648b21:0, corePoolSize=3, maxPoolSize=3 2024-11-16T20:37:51,712 DEBUG [RS:0;40c018648b21:43411 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/40c018648b21:0, corePoolSize=3, maxPoolSize=3 2024-11-16T20:37:51,712 INFO [RS:0;40c018648b21:43411 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T20:37:51,712 INFO [RS:0;40c018648b21:43411 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T20:37:51,712 INFO [RS:0;40c018648b21:43411 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T20:37:51,712 INFO [RS:0;40c018648b21:43411 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-16T20:37:51,712 INFO [RS:0;40c018648b21:43411 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-16T20:37:51,712 INFO [RS:0;40c018648b21:43411 {}] hbase.ChoreService(168): Chore ScheduledChore name=40c018648b21,43411,1731789471222-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-16T20:37:51,731 INFO [RS:0;40c018648b21:43411 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-16T20:37:51,731 INFO [RS:0;40c018648b21:43411 {}] hbase.ChoreService(168): Chore ScheduledChore name=40c018648b21,43411,1731789471222-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T20:37:51,731 INFO [RS:0;40c018648b21:43411 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T20:37:51,731 INFO [RS:0;40c018648b21:43411 {}] regionserver.Replication(171): 40c018648b21,43411,1731789471222 started 2024-11-16T20:37:51,750 INFO [RS:0;40c018648b21:43411 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T20:37:51,750 INFO [RS:0;40c018648b21:43411 {}] regionserver.HRegionServer(1482): Serving as 40c018648b21,43411,1731789471222, RpcServer on 40c018648b21/172.17.0.2:43411, sessionid=0x101455e38dc0001 2024-11-16T20:37:51,751 DEBUG [RS:0;40c018648b21:43411 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-16T20:37:51,751 DEBUG [RS:0;40c018648b21:43411 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 40c018648b21,43411,1731789471222 2024-11-16T20:37:51,751 DEBUG [RS:0;40c018648b21:43411 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '40c018648b21,43411,1731789471222' 2024-11-16T20:37:51,751 DEBUG [RS:0;40c018648b21:43411 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-16T20:37:51,751 DEBUG [RS:0;40c018648b21:43411 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-16T20:37:51,752 DEBUG [RS:0;40c018648b21:43411 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-16T20:37:51,752 DEBUG [RS:0;40c018648b21:43411 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-16T20:37:51,752 DEBUG [RS:0;40c018648b21:43411 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 40c018648b21,43411,1731789471222 2024-11-16T20:37:51,752 DEBUG [RS:0;40c018648b21:43411 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '40c018648b21,43411,1731789471222' 2024-11-16T20:37:51,752 DEBUG [RS:0;40c018648b21:43411 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-16T20:37:51,752 DEBUG [RS:0;40c018648b21:43411 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-16T20:37:51,752 DEBUG [RS:0;40c018648b21:43411 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-16T20:37:51,753 INFO [RS:0;40c018648b21:43411 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-16T20:37:51,753 INFO [RS:0;40c018648b21:43411 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-16T20:37:51,855 INFO [RS:0;40c018648b21:43411 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=40c018648b21%2C43411%2C1731789471222, suffix=, logDir=hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/WALs/40c018648b21,43411,1731789471222, archiveDir=hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/oldWALs, maxLogs=32 2024-11-16T20:37:51,856 INFO [RS:0;40c018648b21:43411 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 40c018648b21%2C43411%2C1731789471222.1731789471855 2024-11-16T20:37:51,862 INFO [RS:0;40c018648b21:43411 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/WALs/40c018648b21,43411,1731789471222/40c018648b21%2C43411%2C1731789471222.1731789471855 2024-11-16T20:37:51,863 DEBUG [RS:0;40c018648b21:43411 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35917:35917),(127.0.0.1/127.0.0.1:41755:41755)] 2024-11-16T20:37:52,003 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T20:37:52,004 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-16T20:37:52,005 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-16T20:37:52,005 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:37:52,006 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T20:37:52,006 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-16T20:37:52,007 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-16T20:37:52,007 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:37:52,007 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T20:37:52,008 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-16T20:37:52,009 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-16T20:37:52,009 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:37:52,009 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T20:37:52,009 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-16T20:37:52,010 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-16T20:37:52,010 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:37:52,011 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T20:37:52,011 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-16T20:37:52,011 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/hbase/meta/1588230740 2024-11-16T20:37:52,012 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/hbase/meta/1588230740 2024-11-16T20:37:52,013 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-16T20:37:52,013 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-16T20:37:52,013 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-16T20:37:52,014 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-16T20:37:52,016 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T20:37:52,017 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=833785, jitterRate=0.06021283566951752}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-16T20:37:52,017 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731789472003Initializing all the Stores at 1731789472004 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731789472004Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731789472004Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731789472004Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731789472004Cleaning up temporary data from old regions at 1731789472013 (+9 ms)Region opened successfully at 1731789472017 (+4 ms) 2024-11-16T20:37:52,018 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-16T20:37:52,018 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-16T20:37:52,018 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-16T20:37:52,018 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-16T20:37:52,018 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-16T20:37:52,018 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-16T20:37:52,018 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731789472017Disabling compacts and flushes for region at 1731789472017Disabling writes for close at 1731789472018 (+1 ms)Writing region close event to WAL at 1731789472018Closed at 1731789472018 2024-11-16T20:37:52,019 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T20:37:52,019 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-16T20:37:52,019 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-16T20:37:52,020 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-16T20:37:52,022 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-16T20:37:52,172 DEBUG [40c018648b21:44513 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-16T20:37:52,173 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=40c018648b21,43411,1731789471222 2024-11-16T20:37:52,174 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 40c018648b21,43411,1731789471222, state=OPENING 2024-11-16T20:37:52,269 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-16T20:37:52,290 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44513-0x101455e38dc0000, quorum=127.0.0.1:56747, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:37:52,290 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43411-0x101455e38dc0001, quorum=127.0.0.1:56747, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:37:52,290 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T20:37:52,290 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T20:37:52,290 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-16T20:37:52,290 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=40c018648b21,43411,1731789471222}] 2024-11-16T20:37:52,444 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-16T20:37:52,446 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48559, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-16T20:37:52,449 INFO [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-16T20:37:52,449 INFO [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T20:37:52,451 INFO [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=40c018648b21%2C43411%2C1731789471222.meta, suffix=.meta, logDir=hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/WALs/40c018648b21,43411,1731789471222, archiveDir=hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/oldWALs, maxLogs=32 2024-11-16T20:37:52,452 INFO [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 40c018648b21%2C43411%2C1731789471222.meta.1731789472452.meta 2024-11-16T20:37:52,457 INFO [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/WALs/40c018648b21,43411,1731789471222/40c018648b21%2C43411%2C1731789471222.meta.1731789472452.meta 2024-11-16T20:37:52,458 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41755:41755),(127.0.0.1/127.0.0.1:35917:35917)] 2024-11-16T20:37:52,459 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-16T20:37:52,459 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-16T20:37:52,459 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-16T20:37:52,459 INFO [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-16T20:37:52,459 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-16T20:37:52,459 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T20:37:52,459 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-16T20:37:52,459 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-16T20:37:52,463 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-16T20:37:52,464 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-16T20:37:52,464 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:37:52,465 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T20:37:52,465 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-16T20:37:52,465 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-16T20:37:52,465 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:37:52,466 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T20:37:52,466 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-16T20:37:52,466 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-16T20:37:52,466 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:37:52,467 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T20:37:52,467 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-16T20:37:52,467 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-16T20:37:52,467 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:37:52,468 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T20:37:52,468 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-16T20:37:52,468 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/hbase/meta/1588230740 2024-11-16T20:37:52,469 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/hbase/meta/1588230740 2024-11-16T20:37:52,471 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-16T20:37:52,471 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-16T20:37:52,471 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-16T20:37:52,473 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-16T20:37:52,473 INFO [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=828691, jitterRate=0.053736135363578796}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-16T20:37:52,473 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-16T20:37:52,474 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731789472459Writing region info on filesystem at 1731789472459Initializing all the Stores at 1731789472460 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731789472460Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731789472463 (+3 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731789472463Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731789472463Cleaning up temporary data from old regions at 1731789472471 (+8 ms)Running coprocessor post-open hooks at 1731789472473 (+2 ms)Region opened successfully at 1731789472474 (+1 ms) 2024-11-16T20:37:52,475 INFO [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731789472444 2024-11-16T20:37:52,477 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-16T20:37:52,477 INFO [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-16T20:37:52,478 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=40c018648b21,43411,1731789471222 2024-11-16T20:37:52,479 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 40c018648b21,43411,1731789471222, state=OPEN 2024-11-16T20:37:52,516 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44513-0x101455e38dc0000, quorum=127.0.0.1:56747, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-16T20:37:52,516 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43411-0x101455e38dc0001, quorum=127.0.0.1:56747, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-16T20:37:52,516 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=40c018648b21,43411,1731789471222 2024-11-16T20:37:52,516 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T20:37:52,516 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T20:37:52,519 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-16T20:37:52,519 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=40c018648b21,43411,1731789471222 in 226 msec 2024-11-16T20:37:52,522 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-16T20:37:52,522 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 500 msec 2024-11-16T20:37:52,523 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T20:37:52,523 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-16T20:37:52,525 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T20:37:52,525 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=40c018648b21,43411,1731789471222, seqNum=-1] 2024-11-16T20:37:52,525 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T20:37:52,526 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39727, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T20:37:52,532 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 960 msec 2024-11-16T20:37:52,532 INFO [master/40c018648b21:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731789472532, completionTime=-1 2024-11-16T20:37:52,532 INFO [master/40c018648b21:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-16T20:37:52,532 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-16T20:37:52,534 INFO [master/40c018648b21:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-16T20:37:52,534 INFO [master/40c018648b21:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731789532534 2024-11-16T20:37:52,534 INFO [master/40c018648b21:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731789592534 2024-11-16T20:37:52,534 INFO [master/40c018648b21:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 1 msec 2024-11-16T20:37:52,534 INFO [master/40c018648b21:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=40c018648b21,44513,1731789471042-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T20:37:52,534 INFO [master/40c018648b21:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=40c018648b21,44513,1731789471042-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T20:37:52,535 INFO [master/40c018648b21:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=40c018648b21,44513,1731789471042-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T20:37:52,535 INFO [master/40c018648b21:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-40c018648b21:44513, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T20:37:52,535 INFO [master/40c018648b21:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-16T20:37:52,535 INFO [master/40c018648b21:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-16T20:37:52,537 DEBUG [master/40c018648b21:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-16T20:37:52,539 INFO [master/40c018648b21:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.246sec 2024-11-16T20:37:52,539 INFO [master/40c018648b21:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-16T20:37:52,539 INFO [master/40c018648b21:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-16T20:37:52,539 INFO [master/40c018648b21:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-16T20:37:52,539 INFO [master/40c018648b21:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-16T20:37:52,539 INFO [master/40c018648b21:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-16T20:37:52,539 INFO [master/40c018648b21:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=40c018648b21,44513,1731789471042-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-16T20:37:52,539 INFO [master/40c018648b21:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=40c018648b21,44513,1731789471042-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-16T20:37:52,542 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-16T20:37:52,542 INFO [master/40c018648b21:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-16T20:37:52,542 INFO [master/40c018648b21:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=40c018648b21,44513,1731789471042-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T20:37:52,547 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@651a5cab, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T20:37:52,547 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 40c018648b21,44513,-1 for getting cluster id 2024-11-16T20:37:52,547 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-16T20:37:52,549 DEBUG [HMaster-EventLoopGroup-14-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '3bfe0e51-d09b-47d5-b951-598fd70556ac' 2024-11-16T20:37:52,549 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-16T20:37:52,549 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "3bfe0e51-d09b-47d5-b951-598fd70556ac" 2024-11-16T20:37:52,549 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5b5783b4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T20:37:52,549 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [40c018648b21,44513,-1] 2024-11-16T20:37:52,550 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-16T20:37:52,550 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T20:37:52,551 INFO [HMaster-EventLoopGroup-14-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48224, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-16T20:37:52,552 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@22a53521, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T20:37:52,552 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T20:37:52,553 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=40c018648b21,43411,1731789471222, seqNum=-1] 2024-11-16T20:37:52,554 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T20:37:52,555 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38334, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T20:37:52,557 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=40c018648b21,44513,1731789471042 2024-11-16T20:37:52,557 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T20:37:52,559 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-16T20:37:52,560 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-16T20:37:52,561 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.AsyncConnectionImpl(321): The fetched master address is 40c018648b21,44513,1731789471042 2024-11-16T20:37:52,561 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@3152bb03 2024-11-16T20:37:52,561 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-16T20:37:52,562 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48240, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-16T20:37:52,563 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44513 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-16T20:37:52,563 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44513 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-16T20:37:52,563 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44513 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-16T20:37:52,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44513 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling 2024-11-16T20:37:52,566 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-16T20:37:52,566 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:37:52,566 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44513 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRolling" procId is: 4 2024-11-16T20:37:52,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-16T20:37:52,568 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-16T20:37:52,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40163 is added to blk_1073741835_1011 (size=381) 2024-11-16T20:37:52,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34309 is added to blk_1073741835_1011 (size=381) 2024-11-16T20:37:52,576 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => ec472999ca7ae234bff5f83872160c17, NAME => 'TestLogRolling-testLogRolling,,1731789472562.ec472999ca7ae234bff5f83872160c17.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985 2024-11-16T20:37:52,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40163 is added to blk_1073741836_1012 (size=64) 2024-11-16T20:37:52,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34309 is added to blk_1073741836_1012 (size=64) 2024-11-16T20:37:52,590 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731789472562.ec472999ca7ae234bff5f83872160c17.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T20:37:52,591 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing ec472999ca7ae234bff5f83872160c17, disabling compactions & flushes 2024-11-16T20:37:52,591 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731789472562.ec472999ca7ae234bff5f83872160c17. 2024-11-16T20:37:52,591 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731789472562.ec472999ca7ae234bff5f83872160c17. 2024-11-16T20:37:52,591 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731789472562.ec472999ca7ae234bff5f83872160c17. after waiting 0 ms 2024-11-16T20:37:52,591 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731789472562.ec472999ca7ae234bff5f83872160c17. 2024-11-16T20:37:52,591 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731789472562.ec472999ca7ae234bff5f83872160c17. 2024-11-16T20:37:52,591 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for ec472999ca7ae234bff5f83872160c17: Waiting for close lock at 1731789472591Disabling compacts and flushes for region at 1731789472591Disabling writes for close at 1731789472591Writing region close event to WAL at 1731789472591Closed at 1731789472591 2024-11-16T20:37:52,592 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-16T20:37:52,593 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRolling,,1731789472562.ec472999ca7ae234bff5f83872160c17.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1731789472592"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731789472592"}]},"ts":"1731789472592"} 2024-11-16T20:37:52,595 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-16T20:37:52,596 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-16T20:37:52,597 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731789472596"}]},"ts":"1731789472596"} 2024-11-16T20:37:52,599 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLING in hbase:meta 2024-11-16T20:37:52,599 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=ec472999ca7ae234bff5f83872160c17, ASSIGN}] 2024-11-16T20:37:52,600 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=ec472999ca7ae234bff5f83872160c17, ASSIGN 2024-11-16T20:37:52,601 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=ec472999ca7ae234bff5f83872160c17, ASSIGN; state=OFFLINE, location=40c018648b21,43411,1731789471222; forceNewPlan=false, retain=false 2024-11-16T20:37:52,705 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:52,705 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:52,752 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=ec472999ca7ae234bff5f83872160c17, regionState=OPENING, regionLocation=40c018648b21,43411,1731789471222 2024-11-16T20:37:52,754 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=ec472999ca7ae234bff5f83872160c17, ASSIGN because future has completed 2024-11-16T20:37:52,755 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure ec472999ca7ae234bff5f83872160c17, server=40c018648b21,43411,1731789471222}] 2024-11-16T20:37:52,885 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:37:52,886 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:37:52,886 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:37:52,886 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:37:52,886 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:37:52,886 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:37:52,887 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:37:52,888 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:37:52,909 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:37:52,910 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:37:52,910 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:37:52,910 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:37:52,910 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:37:52,910 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:37:52,911 INFO [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1731789472562.ec472999ca7ae234bff5f83872160c17. 2024-11-16T20:37:52,911 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => ec472999ca7ae234bff5f83872160c17, NAME => 'TestLogRolling-testLogRolling,,1731789472562.ec472999ca7ae234bff5f83872160c17.', STARTKEY => '', ENDKEY => ''} 2024-11-16T20:37:52,911 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling ec472999ca7ae234bff5f83872160c17 2024-11-16T20:37:52,911 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731789472562.ec472999ca7ae234bff5f83872160c17.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T20:37:52,911 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for ec472999ca7ae234bff5f83872160c17 2024-11-16T20:37:52,911 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for ec472999ca7ae234bff5f83872160c17 2024-11-16T20:37:52,912 INFO [StoreOpener-ec472999ca7ae234bff5f83872160c17-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region ec472999ca7ae234bff5f83872160c17 2024-11-16T20:37:52,914 INFO [StoreOpener-ec472999ca7ae234bff5f83872160c17-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ec472999ca7ae234bff5f83872160c17 columnFamilyName info 2024-11-16T20:37:52,914 DEBUG [StoreOpener-ec472999ca7ae234bff5f83872160c17-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:37:52,914 INFO [StoreOpener-ec472999ca7ae234bff5f83872160c17-1 {}] regionserver.HStore(327): Store=ec472999ca7ae234bff5f83872160c17/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T20:37:52,914 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:37:52,914 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for ec472999ca7ae234bff5f83872160c17 2024-11-16T20:37:52,915 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:37:52,915 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:37:52,915 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17 2024-11-16T20:37:52,915 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17 2024-11-16T20:37:52,915 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for ec472999ca7ae234bff5f83872160c17 2024-11-16T20:37:52,916 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for ec472999ca7ae234bff5f83872160c17 2024-11-16T20:37:52,917 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for ec472999ca7ae234bff5f83872160c17 2024-11-16T20:37:52,917 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:37:52,919 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T20:37:52,919 INFO [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened ec472999ca7ae234bff5f83872160c17; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=760190, jitterRate=-0.0333690345287323}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-16T20:37:52,919 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for ec472999ca7ae234bff5f83872160c17 2024-11-16T20:37:52,920 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for ec472999ca7ae234bff5f83872160c17: Running coprocessor pre-open hook at 1731789472911Writing region info on filesystem at 1731789472911Initializing all the Stores at 1731789472912 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731789472912Cleaning up temporary data from old regions at 1731789472916 (+4 ms)Running coprocessor post-open hooks at 1731789472919 (+3 ms)Region opened successfully at 1731789472920 (+1 ms) 2024-11-16T20:37:52,921 INFO [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1731789472562.ec472999ca7ae234bff5f83872160c17., pid=6, masterSystemTime=1731789472907 2024-11-16T20:37:52,923 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1731789472562.ec472999ca7ae234bff5f83872160c17. 2024-11-16T20:37:52,923 INFO [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1731789472562.ec472999ca7ae234bff5f83872160c17. 2024-11-16T20:37:52,924 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=ec472999ca7ae234bff5f83872160c17, regionState=OPEN, openSeqNum=2, regionLocation=40c018648b21,43411,1731789471222 2024-11-16T20:37:52,926 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure ec472999ca7ae234bff5f83872160c17, server=40c018648b21,43411,1731789471222 because future has completed 2024-11-16T20:37:52,929 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-16T20:37:52,930 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure ec472999ca7ae234bff5f83872160c17, server=40c018648b21,43411,1731789471222 in 172 msec 2024-11-16T20:37:52,932 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-16T20:37:52,932 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=ec472999ca7ae234bff5f83872160c17, ASSIGN in 330 msec 2024-11-16T20:37:52,933 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-16T20:37:52,933 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731789472933"}]},"ts":"1731789472933"} 2024-11-16T20:37:52,935 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLED in hbase:meta 2024-11-16T20:37:52,936 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-16T20:37:52,939 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling in 373 msec 2024-11-16T20:37:53,423 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-16T20:37:53,424 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:37:53,424 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:37:53,424 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:37:53,424 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:37:53,424 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:37:53,425 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:37:53,425 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:37:53,425 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:37:53,454 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:37:53,454 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:37:53,454 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:37:53,454 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:37:53,455 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:37:53,455 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:37:53,460 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:37:53,460 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:37:53,461 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:37:53,464 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:37:53,706 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:53,706 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:54,707 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:54,707 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:55,143 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-11-16T20:37:55,143 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-16T20:37:55,145 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-16T20:37:55,708 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:55,708 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:56,708 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:56,708 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:57,708 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-16T20:37:57,710 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRolling' 2024-11-16T20:37:57,710 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:57,710 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:58,415 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-16T20:37:58,417 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:37:58,418 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:37:58,418 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:37:58,419 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:37:58,419 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:37:58,420 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:37:58,421 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:37:58,422 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:37:58,445 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:37:58,445 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:37:58,446 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:37:58,446 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:37:58,446 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:37:58,446 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:37:58,450 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:37:58,450 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:37:58,451 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:37:58,454 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:37:58,711 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:58,711 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:59,711 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:37:59,711 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:00,712 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:00,712 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:01,713 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:01,713 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:02,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-16T20:38:02,584 INFO [RPCClient-NioEventLoopGroup-4-7 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRolling completed 2024-11-16T20:38:02,584 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRolling,, stopping at row=TestLogRolling-testLogRolling ,, for max=2147483647 with caching=100 2024-11-16T20:38:02,587 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRolling 2024-11-16T20:38:02,587 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRolling,,1731789472562.ec472999ca7ae234bff5f83872160c17. 2024-11-16T20:38:02,590 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testLogRolling,,1731789472562.ec472999ca7ae234bff5f83872160c17., hostname=40c018648b21,43411,1731789471222, seqNum=2] 2024-11-16T20:38:02,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43411 {}] regionserver.HRegion(8855): Flush requested on ec472999ca7ae234bff5f83872160c17 2024-11-16T20:38:02,603 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ec472999ca7ae234bff5f83872160c17 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-16T20:38:02,623 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17/.tmp/info/94447b8cd1d8444d80a9ea007f244f23 is 1080, key is row0001/info:/1731789482591/Put/seqid=0 2024-11-16T20:38:02,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34309 is added to blk_1073741837_1013 (size=12509) 2024-11-16T20:38:02,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40163 is added to blk_1073741837_1013 (size=12509) 2024-11-16T20:38:02,647 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17/.tmp/info/94447b8cd1d8444d80a9ea007f244f23 2024-11-16T20:38:02,654 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17/.tmp/info/94447b8cd1d8444d80a9ea007f244f23 as hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17/info/94447b8cd1d8444d80a9ea007f244f23 2024-11-16T20:38:02,655 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43411 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=ec472999ca7ae234bff5f83872160c17, server=40c018648b21,43411,1731789471222 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-16T20:38:02,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43411 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:38334 deadline: 1731789492654, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=ec472999ca7ae234bff5f83872160c17, server=40c018648b21,43411,1731789471222 2024-11-16T20:38:02,669 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17/info/94447b8cd1d8444d80a9ea007f244f23, entries=7, sequenceid=11, filesize=12.2 K 2024-11-16T20:38:02,670 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=23.12 KB/23672 for ec472999ca7ae234bff5f83872160c17 in 66ms, sequenceid=11, compaction requested=false 2024-11-16T20:38:02,670 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ec472999ca7ae234bff5f83872160c17: 2024-11-16T20:38:02,685 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1731789472562.ec472999ca7ae234bff5f83872160c17., hostname=40c018648b21,43411,1731789471222, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1731789472562.ec472999ca7ae234bff5f83872160c17., hostname=40c018648b21,43411,1731789471222, seqNum=2, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=ec472999ca7ae234bff5f83872160c17, server=40c018648b21,43411,1731789471222 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-16T20:38:02,686 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1731789472562.ec472999ca7ae234bff5f83872160c17., hostname=40c018648b21,43411,1731789471222, seqNum=2 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=ec472999ca7ae234bff5f83872160c17, server=40c018648b21,43411,1731789471222 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-16T20:38:02,686 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,,1731789472562.ec472999ca7ae234bff5f83872160c17., hostname=40c018648b21,43411,1731789471222, seqNum=2 because the exception is null or not the one we care about 2024-11-16T20:38:02,713 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:02,713 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:03,714 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:03,714 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:04,714 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:04,714 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:05,715 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:05,715 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:06,715 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:06,715 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:07,716 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:07,716 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:08,716 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:08,716 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:09,717 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:09,717 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:10,717 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:10,717 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:11,718 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:11,718 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:12,720 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:12,720 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:12,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43411 {}] regionserver.HRegion(8855): Flush requested on ec472999ca7ae234bff5f83872160c17 2024-11-16T20:38:12,745 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ec472999ca7ae234bff5f83872160c17 1/1 column families, dataSize=24.17 KB heapSize=26.13 KB 2024-11-16T20:38:12,751 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17/.tmp/info/d14a4f61c9084e22b08248f466b31817 is 1080, key is row0008/info:/1731789482604/Put/seqid=0 2024-11-16T20:38:12,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40163 is added to blk_1073741838_1014 (size=29761) 2024-11-16T20:38:12,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34309 is added to blk_1073741838_1014 (size=29761) 2024-11-16T20:38:12,757 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.17 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17/.tmp/info/d14a4f61c9084e22b08248f466b31817 2024-11-16T20:38:12,765 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17/.tmp/info/d14a4f61c9084e22b08248f466b31817 as hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17/info/d14a4f61c9084e22b08248f466b31817 2024-11-16T20:38:12,771 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17/info/d14a4f61c9084e22b08248f466b31817, entries=23, sequenceid=37, filesize=29.1 K 2024-11-16T20:38:12,772 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~24.17 KB/24748, heapSize ~26.11 KB/26736, currentSize=2.10 KB/2152 for ec472999ca7ae234bff5f83872160c17 in 27ms, sequenceid=37, compaction requested=false 2024-11-16T20:38:12,772 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ec472999ca7ae234bff5f83872160c17: 2024-11-16T20:38:12,772 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=41.3 K, sizeToCheck=16.0 K 2024-11-16T20:38:12,772 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T20:38:12,772 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17/info/d14a4f61c9084e22b08248f466b31817 because midkey is the same as first or last row 2024-11-16T20:38:13,720 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:13,720 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:14,721 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:14,721 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:14,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43411 {}] regionserver.HRegion(8855): Flush requested on ec472999ca7ae234bff5f83872160c17 2024-11-16T20:38:14,760 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ec472999ca7ae234bff5f83872160c17 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-16T20:38:14,764 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17/.tmp/info/8dfd922f23884969945ca4595b0cc7c0 is 1080, key is row0031/info:/1731789492747/Put/seqid=0 2024-11-16T20:38:14,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34309 is added to blk_1073741839_1015 (size=12509) 2024-11-16T20:38:14,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40163 is added to blk_1073741839_1015 (size=12509) 2024-11-16T20:38:14,771 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=47 (bloomFilter=true), to=hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17/.tmp/info/8dfd922f23884969945ca4595b0cc7c0 2024-11-16T20:38:14,778 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17/.tmp/info/8dfd922f23884969945ca4595b0cc7c0 as hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17/info/8dfd922f23884969945ca4595b0cc7c0 2024-11-16T20:38:14,785 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17/info/8dfd922f23884969945ca4595b0cc7c0, entries=7, sequenceid=47, filesize=12.2 K 2024-11-16T20:38:14,786 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=12.61 KB/12912 for ec472999ca7ae234bff5f83872160c17 in 25ms, sequenceid=47, compaction requested=true 2024-11-16T20:38:14,786 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ec472999ca7ae234bff5f83872160c17: 2024-11-16T20:38:14,786 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=53.5 K, sizeToCheck=16.0 K 2024-11-16T20:38:14,786 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T20:38:14,786 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17/info/d14a4f61c9084e22b08248f466b31817 because midkey is the same as first or last row 2024-11-16T20:38:14,786 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ec472999ca7ae234bff5f83872160c17:info, priority=-2147483648, current under compaction store size is 1 2024-11-16T20:38:14,786 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T20:38:14,786 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-16T20:38:14,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43411 {}] regionserver.HRegion(8855): Flush requested on ec472999ca7ae234bff5f83872160c17 2024-11-16T20:38:14,787 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ec472999ca7ae234bff5f83872160c17 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-16T20:38:14,788 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 54779 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-16T20:38:14,788 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.HStore(1541): ec472999ca7ae234bff5f83872160c17/info is initiating minor compaction (all files) 2024-11-16T20:38:14,788 INFO [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of ec472999ca7ae234bff5f83872160c17/info in TestLogRolling-testLogRolling,,1731789472562.ec472999ca7ae234bff5f83872160c17. 2024-11-16T20:38:14,788 INFO [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17/info/94447b8cd1d8444d80a9ea007f244f23, hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17/info/d14a4f61c9084e22b08248f466b31817, hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17/info/8dfd922f23884969945ca4595b0cc7c0] into tmpdir=hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17/.tmp, totalSize=53.5 K 2024-11-16T20:38:14,788 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] compactions.Compactor(225): Compacting 94447b8cd1d8444d80a9ea007f244f23, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731789482591 2024-11-16T20:38:14,789 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] compactions.Compactor(225): Compacting d14a4f61c9084e22b08248f466b31817, keycount=23, bloomtype=ROW, size=29.1 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1731789482604 2024-11-16T20:38:14,789 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] compactions.Compactor(225): Compacting 8dfd922f23884969945ca4595b0cc7c0, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1731789492747 2024-11-16T20:38:14,791 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17/.tmp/info/a5a7c8d9b49947009e13e5be7efd538b is 1080, key is row0038/info:/1731789494761/Put/seqid=0 2024-11-16T20:38:14,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40163 is added to blk_1073741840_1016 (size=18987) 2024-11-16T20:38:14,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34309 is added to blk_1073741840_1016 (size=18987) 2024-11-16T20:38:14,810 INFO [RS:0;40c018648b21:43411-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ec472999ca7ae234bff5f83872160c17#info#compaction#59 average throughput is 18.98 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-16T20:38:14,811 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17/.tmp/info/b799d94102ce452b907c99725082d958 is 1080, key is row0001/info:/1731789482591/Put/seqid=0 2024-11-16T20:38:14,812 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=63 (bloomFilter=true), to=hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17/.tmp/info/a5a7c8d9b49947009e13e5be7efd538b 2024-11-16T20:38:14,818 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17/.tmp/info/a5a7c8d9b49947009e13e5be7efd538b as hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17/info/a5a7c8d9b49947009e13e5be7efd538b 2024-11-16T20:38:14,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40163 is added to blk_1073741841_1017 (size=44978) 2024-11-16T20:38:14,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34309 is added to blk_1073741841_1017 (size=44978) 2024-11-16T20:38:14,823 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17/info/a5a7c8d9b49947009e13e5be7efd538b, entries=13, sequenceid=63, filesize=18.5 K 2024-11-16T20:38:14,824 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=14.71 KB/15064 for ec472999ca7ae234bff5f83872160c17 in 37ms, sequenceid=63, compaction requested=false 2024-11-16T20:38:14,824 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ec472999ca7ae234bff5f83872160c17: 2024-11-16T20:38:14,824 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=72.0 K, sizeToCheck=16.0 K 2024-11-16T20:38:14,824 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T20:38:14,824 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17/info/d14a4f61c9084e22b08248f466b31817 because midkey is the same as first or last row 2024-11-16T20:38:14,826 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17/.tmp/info/b799d94102ce452b907c99725082d958 as hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17/info/b799d94102ce452b907c99725082d958 2024-11-16T20:38:14,831 INFO [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in ec472999ca7ae234bff5f83872160c17/info of ec472999ca7ae234bff5f83872160c17 into b799d94102ce452b907c99725082d958(size=43.9 K), total size for store is 62.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-16T20:38:14,831 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for ec472999ca7ae234bff5f83872160c17: 2024-11-16T20:38:14,831 INFO [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731789472562.ec472999ca7ae234bff5f83872160c17., storeName=ec472999ca7ae234bff5f83872160c17/info, priority=13, startTime=1731789494786; duration=0sec 2024-11-16T20:38:14,831 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=62.5 K, sizeToCheck=16.0 K 2024-11-16T20:38:14,831 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T20:38:14,831 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17/info/b799d94102ce452b907c99725082d958 because midkey is the same as first or last row 2024-11-16T20:38:14,831 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=62.5 K, sizeToCheck=16.0 K 2024-11-16T20:38:14,831 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T20:38:14,831 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17/info/b799d94102ce452b907c99725082d958 because midkey is the same as first or last row 2024-11-16T20:38:14,832 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=62.5 K, sizeToCheck=16.0 K 2024-11-16T20:38:14,832 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T20:38:14,832 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17/info/b799d94102ce452b907c99725082d958 because midkey is the same as first or last row 2024-11-16T20:38:14,832 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T20:38:14,832 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ec472999ca7ae234bff5f83872160c17:info 2024-11-16T20:38:15,721 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:15,721 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:16,793 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:16,794 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:16,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43411 {}] regionserver.HRegion(8855): Flush requested on ec472999ca7ae234bff5f83872160c17 2024-11-16T20:38:16,817 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ec472999ca7ae234bff5f83872160c17 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-11-16T20:38:16,822 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17/.tmp/info/1ddc402346004abfa27ddfa119b87aec is 1080, key is row0051/info:/1731789494788/Put/seqid=0 2024-11-16T20:38:16,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34309 is added to blk_1073741842_1018 (size=21141) 2024-11-16T20:38:16,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40163 is added to blk_1073741842_1018 (size=21141) 2024-11-16T20:38:16,830 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=82 (bloomFilter=true), to=hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17/.tmp/info/1ddc402346004abfa27ddfa119b87aec 2024-11-16T20:38:16,838 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17/.tmp/info/1ddc402346004abfa27ddfa119b87aec as hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17/info/1ddc402346004abfa27ddfa119b87aec 2024-11-16T20:38:16,847 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17/info/1ddc402346004abfa27ddfa119b87aec, entries=15, sequenceid=82, filesize=20.6 K 2024-11-16T20:38:16,848 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=11.56 KB/11836 for ec472999ca7ae234bff5f83872160c17 in 30ms, sequenceid=82, compaction requested=true 2024-11-16T20:38:16,848 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ec472999ca7ae234bff5f83872160c17: 2024-11-16T20:38:16,848 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=83.1 K, sizeToCheck=16.0 K 2024-11-16T20:38:16,848 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T20:38:16,848 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17/info/b799d94102ce452b907c99725082d958 because midkey is the same as first or last row 2024-11-16T20:38:16,848 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ec472999ca7ae234bff5f83872160c17:info, priority=-2147483648, current under compaction store size is 1 2024-11-16T20:38:16,848 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T20:38:16,848 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-16T20:38:16,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43411 {}] regionserver.HRegion(8855): Flush requested on ec472999ca7ae234bff5f83872160c17 2024-11-16T20:38:16,850 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 85106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-16T20:38:16,850 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ec472999ca7ae234bff5f83872160c17 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-16T20:38:16,850 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.HStore(1541): ec472999ca7ae234bff5f83872160c17/info is initiating minor compaction (all files) 2024-11-16T20:38:16,850 INFO [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of ec472999ca7ae234bff5f83872160c17/info in TestLogRolling-testLogRolling,,1731789472562.ec472999ca7ae234bff5f83872160c17. 2024-11-16T20:38:16,850 INFO [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17/info/b799d94102ce452b907c99725082d958, hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17/info/a5a7c8d9b49947009e13e5be7efd538b, hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17/info/1ddc402346004abfa27ddfa119b87aec] into tmpdir=hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17/.tmp, totalSize=83.1 K 2024-11-16T20:38:16,851 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] compactions.Compactor(225): Compacting b799d94102ce452b907c99725082d958, keycount=37, bloomtype=ROW, size=43.9 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1731789482591 2024-11-16T20:38:16,851 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] compactions.Compactor(225): Compacting a5a7c8d9b49947009e13e5be7efd538b, keycount=13, bloomtype=ROW, size=18.5 K, encoding=NONE, compression=NONE, seqNum=63, earliestPutTs=1731789494761 2024-11-16T20:38:16,852 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1ddc402346004abfa27ddfa119b87aec, keycount=15, bloomtype=ROW, size=20.6 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1731789494788 2024-11-16T20:38:16,856 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17/.tmp/info/f07bc69420ab4bf49fec50aaa07fddc5 is 1080, key is row0066/info:/1731789496819/Put/seqid=0 2024-11-16T20:38:16,868 INFO [RS:0;40c018648b21:43411-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ec472999ca7ae234bff5f83872160c17#info#compaction#62 average throughput is 33.35 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-16T20:38:16,869 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17/.tmp/info/adf6f2f98ccf48c4beb848e39355383d is 1080, key is row0001/info:/1731789482591/Put/seqid=0 2024-11-16T20:38:16,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34309 is added to blk_1073741843_1019 (size=17894) 2024-11-16T20:38:16,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40163 is added to blk_1073741843_1019 (size=17894) 2024-11-16T20:38:16,872 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=97 (bloomFilter=true), to=hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17/.tmp/info/f07bc69420ab4bf49fec50aaa07fddc5 2024-11-16T20:38:16,880 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17/.tmp/info/f07bc69420ab4bf49fec50aaa07fddc5 as hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17/info/f07bc69420ab4bf49fec50aaa07fddc5 2024-11-16T20:38:16,887 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17/info/f07bc69420ab4bf49fec50aaa07fddc5, entries=12, sequenceid=97, filesize=17.5 K 2024-11-16T20:38:16,889 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=12.61 KB/12912 for ec472999ca7ae234bff5f83872160c17 in 39ms, sequenceid=97, compaction requested=false 2024-11-16T20:38:16,889 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ec472999ca7ae234bff5f83872160c17: 2024-11-16T20:38:16,889 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=100.6 K, sizeToCheck=16.0 K 2024-11-16T20:38:16,889 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T20:38:16,889 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17/info/b799d94102ce452b907c99725082d958 because midkey is the same as first or last row 2024-11-16T20:38:16,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43411 {}] regionserver.HRegion(8855): Flush requested on ec472999ca7ae234bff5f83872160c17 2024-11-16T20:38:16,889 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ec472999ca7ae234bff5f83872160c17 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-16T20:38:16,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34309 is added to blk_1073741844_1020 (size=75378) 2024-11-16T20:38:16,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40163 is added to blk_1073741844_1020 (size=75378) 2024-11-16T20:38:16,896 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17/.tmp/info/f40e67c765794f5c9aac797adf80964b is 1080, key is row0078/info:/1731789496851/Put/seqid=0 2024-11-16T20:38:16,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34309 is added to blk_1073741845_1021 (size=18987) 2024-11-16T20:38:16,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40163 is added to blk_1073741845_1021 (size=18987) 2024-11-16T20:38:16,908 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=113 (bloomFilter=true), to=hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17/.tmp/info/f40e67c765794f5c9aac797adf80964b 2024-11-16T20:38:16,915 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17/.tmp/info/f40e67c765794f5c9aac797adf80964b as hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17/info/f40e67c765794f5c9aac797adf80964b 2024-11-16T20:38:16,922 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17/info/f40e67c765794f5c9aac797adf80964b, entries=13, sequenceid=113, filesize=18.5 K 2024-11-16T20:38:16,923 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=6.30 KB/6456 for ec472999ca7ae234bff5f83872160c17 in 34ms, sequenceid=113, compaction requested=false 2024-11-16T20:38:16,923 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ec472999ca7ae234bff5f83872160c17: 2024-11-16T20:38:16,923 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=119.1 K, sizeToCheck=16.0 K 2024-11-16T20:38:16,923 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T20:38:16,923 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17/info/b799d94102ce452b907c99725082d958 because midkey is the same as first or last row 2024-11-16T20:38:17,302 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17/.tmp/info/adf6f2f98ccf48c4beb848e39355383d as hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17/info/adf6f2f98ccf48c4beb848e39355383d 2024-11-16T20:38:17,308 INFO [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in ec472999ca7ae234bff5f83872160c17/info of ec472999ca7ae234bff5f83872160c17 into adf6f2f98ccf48c4beb848e39355383d(size=73.6 K), total size for store is 109.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-16T20:38:17,308 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for ec472999ca7ae234bff5f83872160c17: 2024-11-16T20:38:17,308 INFO [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731789472562.ec472999ca7ae234bff5f83872160c17., storeName=ec472999ca7ae234bff5f83872160c17/info, priority=13, startTime=1731789496848; duration=0sec 2024-11-16T20:38:17,308 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=109.6 K, sizeToCheck=16.0 K 2024-11-16T20:38:17,308 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T20:38:17,308 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=109.6 K, sizeToCheck=16.0 K 2024-11-16T20:38:17,308 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T20:38:17,308 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=109.6 K, sizeToCheck=16.0 K 2024-11-16T20:38:17,308 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T20:38:17,309 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1731789472562.ec472999ca7ae234bff5f83872160c17., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T20:38:17,309 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T20:38:17,309 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ec472999ca7ae234bff5f83872160c17:info 2024-11-16T20:38:17,310 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44513 {}] assignment.AssignmentManager(1355): Split request from 40c018648b21,43411,1731789471222, parent={ENCODED => ec472999ca7ae234bff5f83872160c17, NAME => 'TestLogRolling-testLogRolling,,1731789472562.ec472999ca7ae234bff5f83872160c17.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-11-16T20:38:17,315 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44513 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=40c018648b21,43411,1731789471222 2024-11-16T20:38:17,319 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44513 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=ec472999ca7ae234bff5f83872160c17, daughterA=f1a3a111b221e74628f64224b1aaf85e, daughterB=0a3be2714e6da9eb925be22aead0c4a5 2024-11-16T20:38:17,320 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=ec472999ca7ae234bff5f83872160c17, daughterA=f1a3a111b221e74628f64224b1aaf85e, daughterB=0a3be2714e6da9eb925be22aead0c4a5 2024-11-16T20:38:17,321 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=ec472999ca7ae234bff5f83872160c17, daughterA=f1a3a111b221e74628f64224b1aaf85e, daughterB=0a3be2714e6da9eb925be22aead0c4a5 2024-11-16T20:38:17,321 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=ec472999ca7ae234bff5f83872160c17, daughterA=f1a3a111b221e74628f64224b1aaf85e, daughterB=0a3be2714e6da9eb925be22aead0c4a5 2024-11-16T20:38:17,327 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=ec472999ca7ae234bff5f83872160c17, UNASSIGN}] 2024-11-16T20:38:17,328 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=ec472999ca7ae234bff5f83872160c17, UNASSIGN 2024-11-16T20:38:17,330 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=ec472999ca7ae234bff5f83872160c17, regionState=CLOSING, regionLocation=40c018648b21,43411,1731789471222 2024-11-16T20:38:17,332 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=ec472999ca7ae234bff5f83872160c17, UNASSIGN because future has completed 2024-11-16T20:38:17,333 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-11-16T20:38:17,333 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure ec472999ca7ae234bff5f83872160c17, server=40c018648b21,43411,1731789471222}] 2024-11-16T20:38:17,489 INFO [RS_CLOSE_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(122): Close ec472999ca7ae234bff5f83872160c17 2024-11-16T20:38:17,489 DEBUG [RS_CLOSE_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-11-16T20:38:17,490 DEBUG [RS_CLOSE_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1722): Closing ec472999ca7ae234bff5f83872160c17, disabling compactions & flushes 2024-11-16T20:38:17,490 INFO [RS_CLOSE_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731789472562.ec472999ca7ae234bff5f83872160c17. 2024-11-16T20:38:17,490 DEBUG [RS_CLOSE_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731789472562.ec472999ca7ae234bff5f83872160c17. 2024-11-16T20:38:17,490 DEBUG [RS_CLOSE_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731789472562.ec472999ca7ae234bff5f83872160c17. after waiting 0 ms 2024-11-16T20:38:17,490 DEBUG [RS_CLOSE_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731789472562.ec472999ca7ae234bff5f83872160c17. 2024-11-16T20:38:17,490 INFO [RS_CLOSE_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(2902): Flushing ec472999ca7ae234bff5f83872160c17 1/1 column families, dataSize=6.30 KB heapSize=7 KB 2024-11-16T20:38:17,494 DEBUG [RS_CLOSE_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17/.tmp/info/61f4ea80bf044926a61a65d26772c65d is 1080, key is row0091/info:/1731789496890/Put/seqid=0 2024-11-16T20:38:17,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34309 is added to blk_1073741846_1022 (size=11424) 2024-11-16T20:38:17,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40163 is added to blk_1073741846_1022 (size=11424) 2024-11-16T20:38:17,501 INFO [RS_CLOSE_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.30 KB at sequenceid=123 (bloomFilter=true), to=hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17/.tmp/info/61f4ea80bf044926a61a65d26772c65d 2024-11-16T20:38:17,506 DEBUG [RS_CLOSE_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17/.tmp/info/61f4ea80bf044926a61a65d26772c65d as hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17/info/61f4ea80bf044926a61a65d26772c65d 2024-11-16T20:38:17,512 INFO [RS_CLOSE_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17/info/61f4ea80bf044926a61a65d26772c65d, entries=6, sequenceid=123, filesize=11.2 K 2024-11-16T20:38:17,513 INFO [RS_CLOSE_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(3140): Finished flush of dataSize ~6.30 KB/6456, heapSize ~6.98 KB/7152, currentSize=0 B/0 for ec472999ca7ae234bff5f83872160c17 in 23ms, sequenceid=123, compaction requested=true 2024-11-16T20:38:17,514 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731789472562.ec472999ca7ae234bff5f83872160c17.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17/info/94447b8cd1d8444d80a9ea007f244f23, hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17/info/d14a4f61c9084e22b08248f466b31817, hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17/info/b799d94102ce452b907c99725082d958, hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17/info/8dfd922f23884969945ca4595b0cc7c0, hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17/info/a5a7c8d9b49947009e13e5be7efd538b, hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17/info/1ddc402346004abfa27ddfa119b87aec] to archive 2024-11-16T20:38:17,515 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731789472562.ec472999ca7ae234bff5f83872160c17.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-16T20:38:17,517 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731789472562.ec472999ca7ae234bff5f83872160c17.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17/info/94447b8cd1d8444d80a9ea007f244f23 to hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/archive/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17/info/94447b8cd1d8444d80a9ea007f244f23 2024-11-16T20:38:17,518 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731789472562.ec472999ca7ae234bff5f83872160c17.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17/info/d14a4f61c9084e22b08248f466b31817 to hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/archive/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17/info/d14a4f61c9084e22b08248f466b31817 2024-11-16T20:38:17,519 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731789472562.ec472999ca7ae234bff5f83872160c17.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17/info/b799d94102ce452b907c99725082d958 to hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/archive/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17/info/b799d94102ce452b907c99725082d958 2024-11-16T20:38:17,521 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731789472562.ec472999ca7ae234bff5f83872160c17.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17/info/8dfd922f23884969945ca4595b0cc7c0 to hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/archive/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17/info/8dfd922f23884969945ca4595b0cc7c0 2024-11-16T20:38:17,522 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731789472562.ec472999ca7ae234bff5f83872160c17.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17/info/a5a7c8d9b49947009e13e5be7efd538b to hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/archive/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17/info/a5a7c8d9b49947009e13e5be7efd538b 2024-11-16T20:38:17,523 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731789472562.ec472999ca7ae234bff5f83872160c17.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17/info/1ddc402346004abfa27ddfa119b87aec to hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/archive/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17/info/1ddc402346004abfa27ddfa119b87aec 2024-11-16T20:38:17,530 DEBUG [RS_CLOSE_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17/recovered.edits/126.seqid, newMaxSeqId=126, maxSeqId=1 2024-11-16T20:38:17,531 INFO [RS_CLOSE_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731789472562.ec472999ca7ae234bff5f83872160c17. 2024-11-16T20:38:17,531 DEBUG [RS_CLOSE_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1676): Region close journal for ec472999ca7ae234bff5f83872160c17: Waiting for close lock at 1731789497490Running coprocessor pre-close hooks at 1731789497490Disabling compacts and flushes for region at 1731789497490Disabling writes for close at 1731789497490Obtaining lock to block concurrent updates at 1731789497490Preparing flush snapshotting stores in ec472999ca7ae234bff5f83872160c17 at 1731789497490Finished memstore snapshotting TestLogRolling-testLogRolling,,1731789472562.ec472999ca7ae234bff5f83872160c17., syncing WAL and waiting on mvcc, flushsize=dataSize=6456, getHeapSize=7152, getOffHeapSize=0, getCellsCount=6 at 1731789497490Flushing stores of TestLogRolling-testLogRolling,,1731789472562.ec472999ca7ae234bff5f83872160c17. at 1731789497491 (+1 ms)Flushing ec472999ca7ae234bff5f83872160c17/info: creating writer at 1731789497491Flushing ec472999ca7ae234bff5f83872160c17/info: appending metadata at 1731789497493 (+2 ms)Flushing ec472999ca7ae234bff5f83872160c17/info: closing flushed file at 1731789497493Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4e344728: reopening flushed file at 1731789497505 (+12 ms)Finished flush of dataSize ~6.30 KB/6456, heapSize ~6.98 KB/7152, currentSize=0 B/0 for ec472999ca7ae234bff5f83872160c17 in 23ms, sequenceid=123, compaction requested=true at 1731789497513 (+8 ms)Writing region close event to WAL at 1731789497526 (+13 ms)Running coprocessor post-close hooks at 1731789497531 (+5 ms)Closed at 1731789497531 2024-11-16T20:38:17,533 INFO [RS_CLOSE_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(157): Closed ec472999ca7ae234bff5f83872160c17 2024-11-16T20:38:17,534 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=ec472999ca7ae234bff5f83872160c17, regionState=CLOSED 2024-11-16T20:38:17,536 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure ec472999ca7ae234bff5f83872160c17, server=40c018648b21,43411,1731789471222 because future has completed 2024-11-16T20:38:17,540 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=8 2024-11-16T20:38:17,540 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=8, state=SUCCESS, hasLock=false; CloseRegionProcedure ec472999ca7ae234bff5f83872160c17, server=40c018648b21,43411,1731789471222 in 205 msec 2024-11-16T20:38:17,542 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-16T20:38:17,542 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=ec472999ca7ae234bff5f83872160c17, UNASSIGN in 213 msec 2024-11-16T20:38:17,551 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:38:17,555 INFO [PEWorker-4 {}] assignment.SplitTableRegionProcedure(728): pid=7 splitting 4 storefiles, region=ec472999ca7ae234bff5f83872160c17, threads=4 2024-11-16T20:38:17,557 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17/info/f07bc69420ab4bf49fec50aaa07fddc5 for region: ec472999ca7ae234bff5f83872160c17 2024-11-16T20:38:17,558 DEBUG [StoreFileSplitter-pool-3 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17/info/f40e67c765794f5c9aac797adf80964b for region: ec472999ca7ae234bff5f83872160c17 2024-11-16T20:38:17,558 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17/info/61f4ea80bf044926a61a65d26772c65d for region: ec472999ca7ae234bff5f83872160c17 2024-11-16T20:38:17,558 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17/info/adf6f2f98ccf48c4beb848e39355383d for region: ec472999ca7ae234bff5f83872160c17 2024-11-16T20:38:17,568 DEBUG [StoreFileSplitter-pool-3 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17/info/f40e67c765794f5c9aac797adf80964b, top=true 2024-11-16T20:38:17,575 DEBUG [StoreFileSplitter-pool-0 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17/info/61f4ea80bf044926a61a65d26772c65d, top=true 2024-11-16T20:38:17,575 DEBUG [StoreFileSplitter-pool-2 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17/info/f07bc69420ab4bf49fec50aaa07fddc5, top=true 2024-11-16T20:38:17,580 INFO [StoreFileSplitter-pool-3 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/TestLogRolling-testLogRolling=ec472999ca7ae234bff5f83872160c17-f40e67c765794f5c9aac797adf80964b for child: 0a3be2714e6da9eb925be22aead0c4a5, parent: ec472999ca7ae234bff5f83872160c17 2024-11-16T20:38:17,580 DEBUG [StoreFileSplitter-pool-3 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17/info/f40e67c765794f5c9aac797adf80964b for region: ec472999ca7ae234bff5f83872160c17 2024-11-16T20:38:17,581 INFO [StoreFileSplitter-pool-0 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/TestLogRolling-testLogRolling=ec472999ca7ae234bff5f83872160c17-61f4ea80bf044926a61a65d26772c65d for child: 0a3be2714e6da9eb925be22aead0c4a5, parent: ec472999ca7ae234bff5f83872160c17 2024-11-16T20:38:17,582 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17/info/61f4ea80bf044926a61a65d26772c65d for region: ec472999ca7ae234bff5f83872160c17 2024-11-16T20:38:17,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34309 is added to blk_1073741847_1023 (size=27) 2024-11-16T20:38:17,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40163 is added to blk_1073741847_1023 (size=27) 2024-11-16T20:38:17,582 INFO [StoreFileSplitter-pool-2 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/TestLogRolling-testLogRolling=ec472999ca7ae234bff5f83872160c17-f07bc69420ab4bf49fec50aaa07fddc5 for child: 0a3be2714e6da9eb925be22aead0c4a5, parent: ec472999ca7ae234bff5f83872160c17 2024-11-16T20:38:17,582 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17/info/f07bc69420ab4bf49fec50aaa07fddc5 for region: ec472999ca7ae234bff5f83872160c17 2024-11-16T20:38:17,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40163 is added to blk_1073741848_1024 (size=27) 2024-11-16T20:38:17,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34309 is added to blk_1073741848_1024 (size=27) 2024-11-16T20:38:17,596 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17/info/adf6f2f98ccf48c4beb848e39355383d for region: ec472999ca7ae234bff5f83872160c17 2024-11-16T20:38:17,598 DEBUG [PEWorker-4 {}] assignment.SplitTableRegionProcedure(802): pid=7 split storefiles for region ec472999ca7ae234bff5f83872160c17 Daughter A: [hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/f1a3a111b221e74628f64224b1aaf85e/info/adf6f2f98ccf48c4beb848e39355383d.ec472999ca7ae234bff5f83872160c17] storefiles, Daughter B: [hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/TestLogRolling-testLogRolling=ec472999ca7ae234bff5f83872160c17-61f4ea80bf044926a61a65d26772c65d, hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/adf6f2f98ccf48c4beb848e39355383d.ec472999ca7ae234bff5f83872160c17, hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/TestLogRolling-testLogRolling=ec472999ca7ae234bff5f83872160c17-f07bc69420ab4bf49fec50aaa07fddc5, hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/TestLogRolling-testLogRolling=ec472999ca7ae234bff5f83872160c17-f40e67c765794f5c9aac797adf80964b] storefiles. 2024-11-16T20:38:17,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34309 is added to blk_1073741849_1025 (size=71) 2024-11-16T20:38:17,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40163 is added to blk_1073741849_1025 (size=71) 2024-11-16T20:38:17,609 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:38:17,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34309 is added to blk_1073741850_1026 (size=71) 2024-11-16T20:38:17,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40163 is added to blk_1073741850_1026 (size=71) 2024-11-16T20:38:17,623 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:38:17,636 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/f1a3a111b221e74628f64224b1aaf85e/recovered.edits/126.seqid, newMaxSeqId=126, maxSeqId=-1 2024-11-16T20:38:17,639 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/recovered.edits/126.seqid, newMaxSeqId=126, maxSeqId=-1 2024-11-16T20:38:17,642 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1731789472562.ec472999ca7ae234bff5f83872160c17.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1731789497642"},{"qualifier":"splitA","vlen":70,"tag":[],"timestamp":"1731789497642"},{"qualifier":"splitB","vlen":70,"tag":[],"timestamp":"1731789497642"}]},"ts":"1731789497642"} 2024-11-16T20:38:17,643 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1731789497315.f1a3a111b221e74628f64224b1aaf85e.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1731789497642"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731789497642"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1731789497642"}]},"ts":"1731789497642"} 2024-11-16T20:38:17,643 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,row0062,1731789497315.0a3be2714e6da9eb925be22aead0c4a5.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1731789497642"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731789497642"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1731789497642"}]},"ts":"1731789497642"} 2024-11-16T20:38:17,663 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=f1a3a111b221e74628f64224b1aaf85e, ASSIGN}, {pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=0a3be2714e6da9eb925be22aead0c4a5, ASSIGN}] 2024-11-16T20:38:17,665 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=0a3be2714e6da9eb925be22aead0c4a5, ASSIGN 2024-11-16T20:38:17,665 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=f1a3a111b221e74628f64224b1aaf85e, ASSIGN 2024-11-16T20:38:17,666 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=f1a3a111b221e74628f64224b1aaf85e, ASSIGN; state=SPLITTING_NEW, location=40c018648b21,43411,1731789471222; forceNewPlan=false, retain=false 2024-11-16T20:38:17,666 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=0a3be2714e6da9eb925be22aead0c4a5, ASSIGN; state=SPLITTING_NEW, location=40c018648b21,43411,1731789471222; forceNewPlan=false, retain=false 2024-11-16T20:38:17,794 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:17,794 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:17,817 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=f1a3a111b221e74628f64224b1aaf85e, regionState=OPENING, regionLocation=40c018648b21,43411,1731789471222 2024-11-16T20:38:17,817 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=0a3be2714e6da9eb925be22aead0c4a5, regionState=OPENING, regionLocation=40c018648b21,43411,1731789471222 2024-11-16T20:38:17,818 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=f1a3a111b221e74628f64224b1aaf85e, ASSIGN because future has completed 2024-11-16T20:38:17,819 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure f1a3a111b221e74628f64224b1aaf85e, server=40c018648b21,43411,1731789471222}] 2024-11-16T20:38:17,819 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=0a3be2714e6da9eb925be22aead0c4a5, ASSIGN because future has completed 2024-11-16T20:38:17,820 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0a3be2714e6da9eb925be22aead0c4a5, server=40c018648b21,43411,1731789471222}] 2024-11-16T20:38:17,977 INFO [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,row0062,1731789497315.0a3be2714e6da9eb925be22aead0c4a5. 2024-11-16T20:38:17,977 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7752): Opening region: {ENCODED => 0a3be2714e6da9eb925be22aead0c4a5, NAME => 'TestLogRolling-testLogRolling,row0062,1731789497315.0a3be2714e6da9eb925be22aead0c4a5.', STARTKEY => 'row0062', ENDKEY => ''} 2024-11-16T20:38:17,978 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 0a3be2714e6da9eb925be22aead0c4a5 2024-11-16T20:38:17,978 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,row0062,1731789497315.0a3be2714e6da9eb925be22aead0c4a5.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T20:38:17,978 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7794): checking encryption for 0a3be2714e6da9eb925be22aead0c4a5 2024-11-16T20:38:17,978 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7797): checking classloading for 0a3be2714e6da9eb925be22aead0c4a5 2024-11-16T20:38:17,981 INFO [StoreOpener-0a3be2714e6da9eb925be22aead0c4a5-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 0a3be2714e6da9eb925be22aead0c4a5 2024-11-16T20:38:17,982 INFO [StoreOpener-0a3be2714e6da9eb925be22aead0c4a5-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0a3be2714e6da9eb925be22aead0c4a5 columnFamilyName info 2024-11-16T20:38:17,983 DEBUG [StoreOpener-0a3be2714e6da9eb925be22aead0c4a5-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:38:17,994 DEBUG [StoreOpener-0a3be2714e6da9eb925be22aead0c4a5-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/TestLogRolling-testLogRolling=ec472999ca7ae234bff5f83872160c17-61f4ea80bf044926a61a65d26772c65d 2024-11-16T20:38:17,998 DEBUG [StoreOpener-0a3be2714e6da9eb925be22aead0c4a5-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/TestLogRolling-testLogRolling=ec472999ca7ae234bff5f83872160c17-f07bc69420ab4bf49fec50aaa07fddc5 2024-11-16T20:38:18,002 DEBUG [StoreOpener-0a3be2714e6da9eb925be22aead0c4a5-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/TestLogRolling-testLogRolling=ec472999ca7ae234bff5f83872160c17-f40e67c765794f5c9aac797adf80964b 2024-11-16T20:38:18,008 DEBUG [StoreOpener-0a3be2714e6da9eb925be22aead0c4a5-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/adf6f2f98ccf48c4beb848e39355383d.ec472999ca7ae234bff5f83872160c17->hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17/info/adf6f2f98ccf48c4beb848e39355383d-top 2024-11-16T20:38:18,009 INFO [StoreOpener-0a3be2714e6da9eb925be22aead0c4a5-1 {}] regionserver.HStore(327): Store=0a3be2714e6da9eb925be22aead0c4a5/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T20:38:18,009 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1038): replaying wal for 0a3be2714e6da9eb925be22aead0c4a5 2024-11-16T20:38:18,010 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5 2024-11-16T20:38:18,011 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5 2024-11-16T20:38:18,012 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1048): stopping wal replay for 0a3be2714e6da9eb925be22aead0c4a5 2024-11-16T20:38:18,012 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1060): Cleaning up temporary data for 0a3be2714e6da9eb925be22aead0c4a5 2024-11-16T20:38:18,014 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1093): writing seq id for 0a3be2714e6da9eb925be22aead0c4a5 2024-11-16T20:38:18,014 INFO [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1114): Opened 0a3be2714e6da9eb925be22aead0c4a5; next sequenceid=127; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=831505, jitterRate=0.05731363594532013}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-16T20:38:18,015 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 0a3be2714e6da9eb925be22aead0c4a5 2024-11-16T20:38:18,015 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1006): Region open journal for 0a3be2714e6da9eb925be22aead0c4a5: Running coprocessor pre-open hook at 1731789497978Writing region info on filesystem at 1731789497978Initializing all the Stores at 1731789497980 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731789497980Cleaning up temporary data from old regions at 1731789498012 (+32 ms)Running coprocessor post-open hooks at 1731789498015 (+3 ms)Region opened successfully at 1731789498015 2024-11-16T20:38:18,016 INFO [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,row0062,1731789497315.0a3be2714e6da9eb925be22aead0c4a5., pid=13, masterSystemTime=1731789497971 2024-11-16T20:38:18,016 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(403): Add compact mark for store 0a3be2714e6da9eb925be22aead0c4a5:info, priority=-2147483648, current under compaction store size is 1 2024-11-16T20:38:18,016 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T20:38:18,016 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-16T20:38:18,018 INFO [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,row0062,1731789497315.0a3be2714e6da9eb925be22aead0c4a5. 2024-11-16T20:38:18,018 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.HStore(1541): 0a3be2714e6da9eb925be22aead0c4a5/info is initiating minor compaction (all files) 2024-11-16T20:38:18,018 INFO [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 0a3be2714e6da9eb925be22aead0c4a5/info in TestLogRolling-testLogRolling,row0062,1731789497315.0a3be2714e6da9eb925be22aead0c4a5. 2024-11-16T20:38:18,018 INFO [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/adf6f2f98ccf48c4beb848e39355383d.ec472999ca7ae234bff5f83872160c17->hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17/info/adf6f2f98ccf48c4beb848e39355383d-top, hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/TestLogRolling-testLogRolling=ec472999ca7ae234bff5f83872160c17-f07bc69420ab4bf49fec50aaa07fddc5, hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/TestLogRolling-testLogRolling=ec472999ca7ae234bff5f83872160c17-f40e67c765794f5c9aac797adf80964b, hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/TestLogRolling-testLogRolling=ec472999ca7ae234bff5f83872160c17-61f4ea80bf044926a61a65d26772c65d] into tmpdir=hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/.tmp, totalSize=120.8 K 2024-11-16T20:38:18,018 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,row0062,1731789497315.0a3be2714e6da9eb925be22aead0c4a5. 2024-11-16T20:38:18,018 INFO [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,row0062,1731789497315.0a3be2714e6da9eb925be22aead0c4a5. 2024-11-16T20:38:18,019 INFO [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1731789497315.f1a3a111b221e74628f64224b1aaf85e. 2024-11-16T20:38:18,019 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] compactions.Compactor(225): Compacting adf6f2f98ccf48c4beb848e39355383d.ec472999ca7ae234bff5f83872160c17, keycount=32, bloomtype=ROW, size=73.6 K, encoding=NONE, compression=NONE, seqNum=83, earliestPutTs=1731789482591 2024-11-16T20:38:18,019 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7752): Opening region: {ENCODED => f1a3a111b221e74628f64224b1aaf85e, NAME => 'TestLogRolling-testLogRolling,,1731789497315.f1a3a111b221e74628f64224b1aaf85e.', STARTKEY => '', ENDKEY => 'row0062'} 2024-11-16T20:38:18,019 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling f1a3a111b221e74628f64224b1aaf85e 2024-11-16T20:38:18,019 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731789497315.f1a3a111b221e74628f64224b1aaf85e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T20:38:18,019 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=ec472999ca7ae234bff5f83872160c17-f07bc69420ab4bf49fec50aaa07fddc5, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=97, earliestPutTs=1731789496819 2024-11-16T20:38:18,019 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7794): checking encryption for f1a3a111b221e74628f64224b1aaf85e 2024-11-16T20:38:18,019 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7797): checking classloading for f1a3a111b221e74628f64224b1aaf85e 2024-11-16T20:38:18,019 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=0a3be2714e6da9eb925be22aead0c4a5, regionState=OPEN, openSeqNum=127, regionLocation=40c018648b21,43411,1731789471222 2024-11-16T20:38:18,019 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=ec472999ca7ae234bff5f83872160c17-f40e67c765794f5c9aac797adf80964b, keycount=13, bloomtype=ROW, size=18.5 K, encoding=NONE, compression=NONE, seqNum=113, earliestPutTs=1731789496851 2024-11-16T20:38:18,020 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=ec472999ca7ae234bff5f83872160c17-61f4ea80bf044926a61a65d26772c65d, keycount=6, bloomtype=ROW, size=11.2 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1731789496890 2024-11-16T20:38:18,020 INFO [StoreOpener-f1a3a111b221e74628f64224b1aaf85e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region f1a3a111b221e74628f64224b1aaf85e 2024-11-16T20:38:18,021 INFO [StoreOpener-f1a3a111b221e74628f64224b1aaf85e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region f1a3a111b221e74628f64224b1aaf85e columnFamilyName info 2024-11-16T20:38:18,021 DEBUG [StoreOpener-f1a3a111b221e74628f64224b1aaf85e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:38:18,021 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43411 {}] regionserver.HRegion(8855): Flush requested on 1588230740 2024-11-16T20:38:18,021 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-11-16T20:38:18,021 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=5.15 KB heapSize=9 KB 2024-11-16T20:38:18,021 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=13, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0a3be2714e6da9eb925be22aead0c4a5, server=40c018648b21,43411,1731789471222 because future has completed 2024-11-16T20:38:18,025 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=13, resume processing ppid=11 2024-11-16T20:38:18,026 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=11, state=SUCCESS, hasLock=false; OpenRegionProcedure 0a3be2714e6da9eb925be22aead0c4a5, server=40c018648b21,43411,1731789471222 in 203 msec 2024-11-16T20:38:18,027 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=0a3be2714e6da9eb925be22aead0c4a5, ASSIGN in 363 msec 2024-11-16T20:38:18,035 DEBUG [StoreOpener-f1a3a111b221e74628f64224b1aaf85e-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/f1a3a111b221e74628f64224b1aaf85e/info/adf6f2f98ccf48c4beb848e39355383d.ec472999ca7ae234bff5f83872160c17->hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17/info/adf6f2f98ccf48c4beb848e39355383d-bottom 2024-11-16T20:38:18,036 INFO [StoreOpener-f1a3a111b221e74628f64224b1aaf85e-1 {}] regionserver.HStore(327): Store=f1a3a111b221e74628f64224b1aaf85e/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T20:38:18,036 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1038): replaying wal for f1a3a111b221e74628f64224b1aaf85e 2024-11-16T20:38:18,037 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/f1a3a111b221e74628f64224b1aaf85e 2024-11-16T20:38:18,038 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/f1a3a111b221e74628f64224b1aaf85e 2024-11-16T20:38:18,038 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1048): stopping wal replay for f1a3a111b221e74628f64224b1aaf85e 2024-11-16T20:38:18,038 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1060): Cleaning up temporary data for f1a3a111b221e74628f64224b1aaf85e 2024-11-16T20:38:18,040 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/hbase/meta/1588230740/.tmp/info/a8cd5bbf2535445cbf028910ac8d490a is 193, key is TestLogRolling-testLogRolling,row0062,1731789497315.0a3be2714e6da9eb925be22aead0c4a5./info:regioninfo/1731789498019/Put/seqid=0 2024-11-16T20:38:18,040 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1093): writing seq id for f1a3a111b221e74628f64224b1aaf85e 2024-11-16T20:38:18,041 INFO [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1114): Opened f1a3a111b221e74628f64224b1aaf85e; next sequenceid=127; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=770778, jitterRate=-0.019905805587768555}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-16T20:38:18,041 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1122): Running coprocessor post-open hooks for f1a3a111b221e74628f64224b1aaf85e 2024-11-16T20:38:18,042 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1006): Region open journal for f1a3a111b221e74628f64224b1aaf85e: Running coprocessor pre-open hook at 1731789498019Writing region info on filesystem at 1731789498019Initializing all the Stores at 1731789498020 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731789498020Cleaning up temporary data from old regions at 1731789498038 (+18 ms)Running coprocessor post-open hooks at 1731789498041 (+3 ms)Region opened successfully at 1731789498042 (+1 ms) 2024-11-16T20:38:18,043 INFO [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1731789497315.f1a3a111b221e74628f64224b1aaf85e., pid=12, masterSystemTime=1731789497971 2024-11-16T20:38:18,043 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(403): Add compact mark for store f1a3a111b221e74628f64224b1aaf85e:info, priority=-2147483648, current under compaction store size is 2 2024-11-16T20:38:18,043 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T20:38:18,043 DEBUG [RS:0;40c018648b21:43411-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-11-16T20:38:18,043 INFO [RS:0;40c018648b21:43411-longCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,,1731789497315.f1a3a111b221e74628f64224b1aaf85e. 2024-11-16T20:38:18,044 DEBUG [RS:0;40c018648b21:43411-longCompactions-0 {}] regionserver.HStore(1541): f1a3a111b221e74628f64224b1aaf85e/info is initiating minor compaction (all files) 2024-11-16T20:38:18,044 INFO [RS:0;40c018648b21:43411-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of f1a3a111b221e74628f64224b1aaf85e/info in TestLogRolling-testLogRolling,,1731789497315.f1a3a111b221e74628f64224b1aaf85e. 2024-11-16T20:38:18,044 INFO [RS:0;40c018648b21:43411-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/f1a3a111b221e74628f64224b1aaf85e/info/adf6f2f98ccf48c4beb848e39355383d.ec472999ca7ae234bff5f83872160c17->hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17/info/adf6f2f98ccf48c4beb848e39355383d-bottom] into tmpdir=hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/f1a3a111b221e74628f64224b1aaf85e/.tmp, totalSize=73.6 K 2024-11-16T20:38:18,044 DEBUG [RS:0;40c018648b21:43411-longCompactions-0 {}] compactions.Compactor(225): Compacting adf6f2f98ccf48c4beb848e39355383d.ec472999ca7ae234bff5f83872160c17, keycount=32, bloomtype=ROW, size=73.6 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1731789482591 2024-11-16T20:38:18,045 DEBUG [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1731789497315.f1a3a111b221e74628f64224b1aaf85e. 2024-11-16T20:38:18,045 INFO [RS_OPEN_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1731789497315.f1a3a111b221e74628f64224b1aaf85e. 2024-11-16T20:38:18,047 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=f1a3a111b221e74628f64224b1aaf85e, regionState=OPEN, openSeqNum=127, regionLocation=40c018648b21,43411,1731789471222 2024-11-16T20:38:18,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34309 is added to blk_1073741851_1027 (size=9882) 2024-11-16T20:38:18,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40163 is added to blk_1073741851_1027 (size=9882) 2024-11-16T20:38:18,050 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.95 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/hbase/meta/1588230740/.tmp/info/a8cd5bbf2535445cbf028910ac8d490a 2024-11-16T20:38:18,051 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure f1a3a111b221e74628f64224b1aaf85e, server=40c018648b21,43411,1731789471222 because future has completed 2024-11-16T20:38:18,053 INFO [RS:0;40c018648b21:43411-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0a3be2714e6da9eb925be22aead0c4a5#info#compaction#66 average throughput is 35.92 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-16T20:38:18,054 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/.tmp/info/b8a82a341a6d4e55875487a371e6ba39 is 1080, key is row0062/info:/1731789494809/Put/seqid=0 2024-11-16T20:38:18,056 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=10 2024-11-16T20:38:18,056 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=10, state=SUCCESS, hasLock=false; OpenRegionProcedure f1a3a111b221e74628f64224b1aaf85e, server=40c018648b21,43411,1731789471222 in 234 msec 2024-11-16T20:38:18,059 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=7 2024-11-16T20:38:18,059 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=f1a3a111b221e74628f64224b1aaf85e, ASSIGN in 393 msec 2024-11-16T20:38:18,060 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=ec472999ca7ae234bff5f83872160c17, daughterA=f1a3a111b221e74628f64224b1aaf85e, daughterB=0a3be2714e6da9eb925be22aead0c4a5 in 743 msec 2024-11-16T20:38:18,066 INFO [RS:0;40c018648b21:43411-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f1a3a111b221e74628f64224b1aaf85e#info#compaction#67 average throughput is 31.30 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-16T20:38:18,067 DEBUG [RS:0;40c018648b21:43411-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/f1a3a111b221e74628f64224b1aaf85e/.tmp/info/35f4f48fd5444e0bbacd2757fde6c64c is 1080, key is row0001/info:/1731789482591/Put/seqid=0 2024-11-16T20:38:18,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40163 is added to blk_1073741852_1028 (size=43081) 2024-11-16T20:38:18,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34309 is added to blk_1073741852_1028 (size=43081) 2024-11-16T20:38:18,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34309 is added to blk_1073741853_1029 (size=70862) 2024-11-16T20:38:18,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40163 is added to blk_1073741853_1029 (size=70862) 2024-11-16T20:38:18,076 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/.tmp/info/b8a82a341a6d4e55875487a371e6ba39 as hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/b8a82a341a6d4e55875487a371e6ba39 2024-11-16T20:38:18,078 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/hbase/meta/1588230740/.tmp/ns/d409e659f6bc4d31a6b810e9d7a56f4b is 43, key is default/ns:d/1731789472527/Put/seqid=0 2024-11-16T20:38:18,082 DEBUG [RS:0;40c018648b21:43411-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/f1a3a111b221e74628f64224b1aaf85e/.tmp/info/35f4f48fd5444e0bbacd2757fde6c64c as hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/f1a3a111b221e74628f64224b1aaf85e/info/35f4f48fd5444e0bbacd2757fde6c64c 2024-11-16T20:38:18,083 INFO [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 4 (all) file(s) in 0a3be2714e6da9eb925be22aead0c4a5/info of 0a3be2714e6da9eb925be22aead0c4a5 into b8a82a341a6d4e55875487a371e6ba39(size=42.1 K), total size for store is 42.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-16T20:38:18,083 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 0a3be2714e6da9eb925be22aead0c4a5: 2024-11-16T20:38:18,083 INFO [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731789497315.0a3be2714e6da9eb925be22aead0c4a5., storeName=0a3be2714e6da9eb925be22aead0c4a5/info, priority=12, startTime=1731789498016; duration=0sec 2024-11-16T20:38:18,083 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T20:38:18,083 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0a3be2714e6da9eb925be22aead0c4a5:info 2024-11-16T20:38:18,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40163 is added to blk_1073741854_1030 (size=5153) 2024-11-16T20:38:18,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34309 is added to blk_1073741854_1030 (size=5153) 2024-11-16T20:38:18,086 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/hbase/meta/1588230740/.tmp/ns/d409e659f6bc4d31a6b810e9d7a56f4b 2024-11-16T20:38:18,087 INFO [RS:0;40c018648b21:43411-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 1 (all) file(s) in f1a3a111b221e74628f64224b1aaf85e/info of f1a3a111b221e74628f64224b1aaf85e into 35f4f48fd5444e0bbacd2757fde6c64c(size=69.2 K), total size for store is 69.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-16T20:38:18,087 DEBUG [RS:0;40c018648b21:43411-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for f1a3a111b221e74628f64224b1aaf85e: 2024-11-16T20:38:18,087 INFO [RS:0;40c018648b21:43411-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731789497315.f1a3a111b221e74628f64224b1aaf85e., storeName=f1a3a111b221e74628f64224b1aaf85e/info, priority=15, startTime=1731789498043; duration=0sec 2024-11-16T20:38:18,087 DEBUG [RS:0;40c018648b21:43411-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T20:38:18,087 DEBUG [RS:0;40c018648b21:43411-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f1a3a111b221e74628f64224b1aaf85e:info 2024-11-16T20:38:18,103 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/hbase/meta/1588230740/.tmp/table/c840b57215a94413a68f2f369d04a60c is 65, key is TestLogRolling-testLogRolling/table:state/1731789472933/Put/seqid=0 2024-11-16T20:38:18,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34309 is added to blk_1073741855_1031 (size=5340) 2024-11-16T20:38:18,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40163 is added to blk_1073741855_1031 (size=5340) 2024-11-16T20:38:18,107 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=122 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/hbase/meta/1588230740/.tmp/table/c840b57215a94413a68f2f369d04a60c 2024-11-16T20:38:18,112 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/hbase/meta/1588230740/.tmp/info/a8cd5bbf2535445cbf028910ac8d490a as hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/hbase/meta/1588230740/info/a8cd5bbf2535445cbf028910ac8d490a 2024-11-16T20:38:18,118 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/hbase/meta/1588230740/info/a8cd5bbf2535445cbf028910ac8d490a, entries=30, sequenceid=17, filesize=9.7 K 2024-11-16T20:38:18,119 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/hbase/meta/1588230740/.tmp/ns/d409e659f6bc4d31a6b810e9d7a56f4b as hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/hbase/meta/1588230740/ns/d409e659f6bc4d31a6b810e9d7a56f4b 2024-11-16T20:38:18,125 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/hbase/meta/1588230740/ns/d409e659f6bc4d31a6b810e9d7a56f4b, entries=2, sequenceid=17, filesize=5.0 K 2024-11-16T20:38:18,127 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/hbase/meta/1588230740/.tmp/table/c840b57215a94413a68f2f369d04a60c as hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/hbase/meta/1588230740/table/c840b57215a94413a68f2f369d04a60c 2024-11-16T20:38:18,132 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/hbase/meta/1588230740/table/c840b57215a94413a68f2f369d04a60c, entries=2, sequenceid=17, filesize=5.2 K 2024-11-16T20:38:18,133 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~5.15 KB/5269, heapSize ~8.70 KB/8912, currentSize=670 B/670 for 1588230740 in 112ms, sequenceid=17, compaction requested=false 2024-11-16T20:38:18,133 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-16T20:38:18,795 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:18,795 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:18,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43411 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:38334 deadline: 1731789508906, exception=org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731789472562.ec472999ca7ae234bff5f83872160c17. is not online on 40c018648b21,43411,1731789471222 2024-11-16T20:38:18,908 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1731789472562.ec472999ca7ae234bff5f83872160c17., hostname=40c018648b21,43411,1731789471222, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1731789472562.ec472999ca7ae234bff5f83872160c17., hostname=40c018648b21,43411,1731789471222, seqNum=2, error=org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731789472562.ec472999ca7ae234bff5f83872160c17. is not online on 40c018648b21,43411,1731789471222 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-16T20:38:18,908 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1731789472562.ec472999ca7ae234bff5f83872160c17., hostname=40c018648b21,43411,1731789471222, seqNum=2 is org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731789472562.ec472999ca7ae234bff5f83872160c17. is not online on 40c018648b21,43411,1731789471222 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-16T20:38:18,908 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(88): Try removing region=TestLogRolling-testLogRolling,,1731789472562.ec472999ca7ae234bff5f83872160c17., hostname=40c018648b21,43411,1731789471222, seqNum=2 from cache 2024-11-16T20:38:19,796 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:19,797 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:20,798 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:20,798 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:21,022 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-16T20:38:21,798 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:21,798 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:22,532 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:38:22,532 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:38:22,532 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:38:22,532 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:38:22,532 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:38:22,533 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:38:22,533 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:38:22,534 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:38:22,557 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:38:22,558 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:38:22,558 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:38:22,558 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:38:22,558 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:38:22,558 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:38:22,561 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:38:22,561 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:38:22,562 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:38:22,564 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:38:22,799 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:22,799 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:23,073 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-16T20:38:23,075 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:38:23,075 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:38:23,075 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:38:23,076 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:38:23,076 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:38:23,077 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:38:23,078 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:38:23,078 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:38:23,101 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:38:23,102 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:38:23,102 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:38:23,102 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:38:23,102 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:38:23,102 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:38:23,106 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:38:23,106 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:38:23,106 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:38:23,108 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:38:23,800 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:23,800 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:24,801 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:24,801 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:25,802 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:25,802 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:26,803 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:26,803 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:27,804 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:27,804 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:28,805 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:28,805 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:28,948 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0097', locateType=CURRENT is [region=TestLogRolling-testLogRolling,row0062,1731789497315.0a3be2714e6da9eb925be22aead0c4a5., hostname=40c018648b21,43411,1731789471222, seqNum=127] 2024-11-16T20:38:28,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43411 {}] regionserver.HRegion(8855): Flush requested on 0a3be2714e6da9eb925be22aead0c4a5 2024-11-16T20:38:28,961 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0a3be2714e6da9eb925be22aead0c4a5 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-16T20:38:28,965 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/.tmp/info/c8f8857fe7bf46d8bc7d60b0362faded is 1080, key is row0097/info:/1731789508949/Put/seqid=0 2024-11-16T20:38:28,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34309 is added to blk_1073741856_1032 (size=12516) 2024-11-16T20:38:28,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40163 is added to blk_1073741856_1032 (size=12516) 2024-11-16T20:38:28,971 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=137 (bloomFilter=true), to=hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/.tmp/info/c8f8857fe7bf46d8bc7d60b0362faded 2024-11-16T20:38:28,979 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/.tmp/info/c8f8857fe7bf46d8bc7d60b0362faded as hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/c8f8857fe7bf46d8bc7d60b0362faded 2024-11-16T20:38:28,984 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/c8f8857fe7bf46d8bc7d60b0362faded, entries=7, sequenceid=137, filesize=12.2 K 2024-11-16T20:38:28,985 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=11.56 KB/11836 for 0a3be2714e6da9eb925be22aead0c4a5 in 24ms, sequenceid=137, compaction requested=false 2024-11-16T20:38:28,985 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0a3be2714e6da9eb925be22aead0c4a5: 2024-11-16T20:38:28,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43411 {}] regionserver.HRegion(8855): Flush requested on 0a3be2714e6da9eb925be22aead0c4a5 2024-11-16T20:38:28,987 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0a3be2714e6da9eb925be22aead0c4a5 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-16T20:38:28,994 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/.tmp/info/78c981b06f5b41679837b947f241c31f is 1080, key is row0104/info:/1731789508962/Put/seqid=0 2024-11-16T20:38:28,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40163 is added to blk_1073741857_1033 (size=17906) 2024-11-16T20:38:29,000 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=152 (bloomFilter=true), to=hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/.tmp/info/78c981b06f5b41679837b947f241c31f 2024-11-16T20:38:29,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34309 is added to blk_1073741857_1033 (size=17906) 2024-11-16T20:38:29,006 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/.tmp/info/78c981b06f5b41679837b947f241c31f as hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/78c981b06f5b41679837b947f241c31f 2024-11-16T20:38:29,012 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/78c981b06f5b41679837b947f241c31f, entries=12, sequenceid=152, filesize=17.5 K 2024-11-16T20:38:29,013 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=10.51 KB/10760 for 0a3be2714e6da9eb925be22aead0c4a5 in 26ms, sequenceid=152, compaction requested=true 2024-11-16T20:38:29,013 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0a3be2714e6da9eb925be22aead0c4a5: 2024-11-16T20:38:29,013 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0a3be2714e6da9eb925be22aead0c4a5:info, priority=-2147483648, current under compaction store size is 1 2024-11-16T20:38:29,013 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T20:38:29,013 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-16T20:38:29,014 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 73503 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-16T20:38:29,014 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.HStore(1541): 0a3be2714e6da9eb925be22aead0c4a5/info is initiating minor compaction (all files) 2024-11-16T20:38:29,014 INFO [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 0a3be2714e6da9eb925be22aead0c4a5/info in TestLogRolling-testLogRolling,row0062,1731789497315.0a3be2714e6da9eb925be22aead0c4a5. 2024-11-16T20:38:29,015 INFO [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/b8a82a341a6d4e55875487a371e6ba39, hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/c8f8857fe7bf46d8bc7d60b0362faded, hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/78c981b06f5b41679837b947f241c31f] into tmpdir=hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/.tmp, totalSize=71.8 K 2024-11-16T20:38:29,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43411 {}] regionserver.HRegion(8855): Flush requested on 0a3be2714e6da9eb925be22aead0c4a5 2024-11-16T20:38:29,015 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0a3be2714e6da9eb925be22aead0c4a5 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-16T20:38:29,015 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] compactions.Compactor(225): Compacting b8a82a341a6d4e55875487a371e6ba39, keycount=35, bloomtype=ROW, size=42.1 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1731789494809 2024-11-16T20:38:29,015 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] compactions.Compactor(225): Compacting c8f8857fe7bf46d8bc7d60b0362faded, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1731789508949 2024-11-16T20:38:29,016 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] compactions.Compactor(225): Compacting 78c981b06f5b41679837b947f241c31f, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=152, earliestPutTs=1731789508962 2024-11-16T20:38:29,019 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/.tmp/info/f561bc04a953458fa7ac4e2a08fc00a9 is 1080, key is row0116/info:/1731789508988/Put/seqid=0 2024-11-16T20:38:29,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34309 is added to blk_1073741858_1034 (size=17906) 2024-11-16T20:38:29,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40163 is added to blk_1073741858_1034 (size=17906) 2024-11-16T20:38:29,023 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=167 (bloomFilter=true), to=hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/.tmp/info/f561bc04a953458fa7ac4e2a08fc00a9 2024-11-16T20:38:29,029 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/.tmp/info/f561bc04a953458fa7ac4e2a08fc00a9 as hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/f561bc04a953458fa7ac4e2a08fc00a9 2024-11-16T20:38:29,030 INFO [RS:0;40c018648b21:43411-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0a3be2714e6da9eb925be22aead0c4a5#info#compaction#73 average throughput is 27.71 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-16T20:38:29,031 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/.tmp/info/8eded2fc4d8c46779f5b9ca76d5bfe83 is 1080, key is row0062/info:/1731789494809/Put/seqid=0 2024-11-16T20:38:29,034 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/f561bc04a953458fa7ac4e2a08fc00a9, entries=12, sequenceid=167, filesize=17.5 K 2024-11-16T20:38:29,035 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=1.05 KB/1076 for 0a3be2714e6da9eb925be22aead0c4a5 in 20ms, sequenceid=167, compaction requested=false 2024-11-16T20:38:29,035 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0a3be2714e6da9eb925be22aead0c4a5: 2024-11-16T20:38:29,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34309 is added to blk_1073741859_1035 (size=63733) 2024-11-16T20:38:29,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40163 is added to blk_1073741859_1035 (size=63733) 2024-11-16T20:38:29,044 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/.tmp/info/8eded2fc4d8c46779f5b9ca76d5bfe83 as hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/8eded2fc4d8c46779f5b9ca76d5bfe83 2024-11-16T20:38:29,050 INFO [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 0a3be2714e6da9eb925be22aead0c4a5/info of 0a3be2714e6da9eb925be22aead0c4a5 into 8eded2fc4d8c46779f5b9ca76d5bfe83(size=62.2 K), total size for store is 79.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-16T20:38:29,050 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 0a3be2714e6da9eb925be22aead0c4a5: 2024-11-16T20:38:29,050 INFO [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731789497315.0a3be2714e6da9eb925be22aead0c4a5., storeName=0a3be2714e6da9eb925be22aead0c4a5/info, priority=13, startTime=1731789509013; duration=0sec 2024-11-16T20:38:29,050 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T20:38:29,050 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0a3be2714e6da9eb925be22aead0c4a5:info 2024-11-16T20:38:29,807 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:29,807 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:30,808 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:30,808 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:31,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43411 {}] regionserver.HRegion(8855): Flush requested on 0a3be2714e6da9eb925be22aead0c4a5 2024-11-16T20:38:31,038 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0a3be2714e6da9eb925be22aead0c4a5 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-16T20:38:31,043 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/.tmp/info/7626fbc2ce724419b8e323e84f2a63db is 1080, key is row0128/info:/1731789509016/Put/seqid=0 2024-11-16T20:38:31,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34309 is added to blk_1073741860_1036 (size=12516) 2024-11-16T20:38:31,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40163 is added to blk_1073741860_1036 (size=12516) 2024-11-16T20:38:31,075 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43411 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=0a3be2714e6da9eb925be22aead0c4a5, server=40c018648b21,43411,1731789471222 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-16T20:38:31,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43411 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:38334 deadline: 1731789521074, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=0a3be2714e6da9eb925be22aead0c4a5, server=40c018648b21,43411,1731789471222 2024-11-16T20:38:31,075 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,row0062,1731789497315.0a3be2714e6da9eb925be22aead0c4a5., hostname=40c018648b21,43411,1731789471222, seqNum=127 , the old value is region=TestLogRolling-testLogRolling,row0062,1731789497315.0a3be2714e6da9eb925be22aead0c4a5., hostname=40c018648b21,43411,1731789471222, seqNum=127, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=0a3be2714e6da9eb925be22aead0c4a5, server=40c018648b21,43411,1731789471222 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-16T20:38:31,075 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,row0062,1731789497315.0a3be2714e6da9eb925be22aead0c4a5., hostname=40c018648b21,43411,1731789471222, seqNum=127 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=0a3be2714e6da9eb925be22aead0c4a5, server=40c018648b21,43411,1731789471222 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-16T20:38:31,075 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,row0062,1731789497315.0a3be2714e6da9eb925be22aead0c4a5., hostname=40c018648b21,43411,1731789471222, seqNum=127 because the exception is null or not the one we care about 2024-11-16T20:38:31,455 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=178 (bloomFilter=true), to=hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/.tmp/info/7626fbc2ce724419b8e323e84f2a63db 2024-11-16T20:38:31,466 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/.tmp/info/7626fbc2ce724419b8e323e84f2a63db as hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/7626fbc2ce724419b8e323e84f2a63db 2024-11-16T20:38:31,471 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/7626fbc2ce724419b8e323e84f2a63db, entries=7, sequenceid=178, filesize=12.2 K 2024-11-16T20:38:31,472 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=23.12 KB/23672 for 0a3be2714e6da9eb925be22aead0c4a5 in 434ms, sequenceid=178, compaction requested=true 2024-11-16T20:38:31,472 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0a3be2714e6da9eb925be22aead0c4a5: 2024-11-16T20:38:31,472 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0a3be2714e6da9eb925be22aead0c4a5:info, priority=-2147483648, current under compaction store size is 1 2024-11-16T20:38:31,472 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T20:38:31,472 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-16T20:38:31,473 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94155 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-16T20:38:31,474 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.HStore(1541): 0a3be2714e6da9eb925be22aead0c4a5/info is initiating minor compaction (all files) 2024-11-16T20:38:31,474 INFO [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 0a3be2714e6da9eb925be22aead0c4a5/info in TestLogRolling-testLogRolling,row0062,1731789497315.0a3be2714e6da9eb925be22aead0c4a5. 2024-11-16T20:38:31,474 INFO [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/8eded2fc4d8c46779f5b9ca76d5bfe83, hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/f561bc04a953458fa7ac4e2a08fc00a9, hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/7626fbc2ce724419b8e323e84f2a63db] into tmpdir=hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/.tmp, totalSize=91.9 K 2024-11-16T20:38:31,474 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] compactions.Compactor(225): Compacting 8eded2fc4d8c46779f5b9ca76d5bfe83, keycount=54, bloomtype=ROW, size=62.2 K, encoding=NONE, compression=NONE, seqNum=152, earliestPutTs=1731789494809 2024-11-16T20:38:31,474 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] compactions.Compactor(225): Compacting f561bc04a953458fa7ac4e2a08fc00a9, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=167, earliestPutTs=1731789508988 2024-11-16T20:38:31,475 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7626fbc2ce724419b8e323e84f2a63db, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=178, earliestPutTs=1731789509016 2024-11-16T20:38:31,496 INFO [RS:0;40c018648b21:43411-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0a3be2714e6da9eb925be22aead0c4a5#info#compaction#75 average throughput is 37.45 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-16T20:38:31,497 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/.tmp/info/e044e11d3b8e4b6cb92ba3e08dc41eeb is 1080, key is row0062/info:/1731789494809/Put/seqid=0 2024-11-16T20:38:31,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40163 is added to blk_1073741861_1037 (size=84390) 2024-11-16T20:38:31,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34309 is added to blk_1073741861_1037 (size=84390) 2024-11-16T20:38:31,809 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:31,809 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:31,914 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/.tmp/info/e044e11d3b8e4b6cb92ba3e08dc41eeb as hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/e044e11d3b8e4b6cb92ba3e08dc41eeb 2024-11-16T20:38:31,920 INFO [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 0a3be2714e6da9eb925be22aead0c4a5/info of 0a3be2714e6da9eb925be22aead0c4a5 into e044e11d3b8e4b6cb92ba3e08dc41eeb(size=82.4 K), total size for store is 82.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-16T20:38:31,920 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 0a3be2714e6da9eb925be22aead0c4a5: 2024-11-16T20:38:31,920 INFO [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731789497315.0a3be2714e6da9eb925be22aead0c4a5., storeName=0a3be2714e6da9eb925be22aead0c4a5/info, priority=13, startTime=1731789511472; duration=0sec 2024-11-16T20:38:31,921 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T20:38:31,921 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0a3be2714e6da9eb925be22aead0c4a5:info 2024-11-16T20:38:32,601 INFO [master/40c018648b21:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-16T20:38:32,601 INFO [master/40c018648b21:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-16T20:38:32,811 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:32,811 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:33,812 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:33,812 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:34,813 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:34,813 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:35,815 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:35,815 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:36,816 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:36,816 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:37,459 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1588230740, had cached 0 bytes from a total of 20375 2024-11-16T20:38:37,817 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:37,817 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:38,819 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:38,819 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:39,820 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:39,820 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:40,821 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:40,821 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:41,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43411 {}] regionserver.HRegion(8855): Flush requested on 0a3be2714e6da9eb925be22aead0c4a5 2024-11-16T20:38:41,118 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0a3be2714e6da9eb925be22aead0c4a5 1/1 column families, dataSize=24.17 KB heapSize=26.13 KB 2024-11-16T20:38:41,128 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/.tmp/info/8fa426acc999460a9b4173c7bc90a992 is 1080, key is row0135/info:/1731789511040/Put/seqid=0 2024-11-16T20:38:41,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40163 is added to blk_1073741862_1038 (size=29784) 2024-11-16T20:38:41,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34309 is added to blk_1073741862_1038 (size=29784) 2024-11-16T20:38:41,136 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.17 KB at sequenceid=205 (bloomFilter=true), to=hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/.tmp/info/8fa426acc999460a9b4173c7bc90a992 2024-11-16T20:38:41,141 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/.tmp/info/8fa426acc999460a9b4173c7bc90a992 as hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/8fa426acc999460a9b4173c7bc90a992 2024-11-16T20:38:41,145 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/8fa426acc999460a9b4173c7bc90a992, entries=23, sequenceid=205, filesize=29.1 K 2024-11-16T20:38:41,146 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~24.17 KB/24748, heapSize ~26.11 KB/26736, currentSize=3.15 KB/3228 for 0a3be2714e6da9eb925be22aead0c4a5 in 28ms, sequenceid=205, compaction requested=false 2024-11-16T20:38:41,146 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0a3be2714e6da9eb925be22aead0c4a5: 2024-11-16T20:38:41,822 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:41,822 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:42,823 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:42,823 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:43,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43411 {}] regionserver.HRegion(8855): Flush requested on 0a3be2714e6da9eb925be22aead0c4a5 2024-11-16T20:38:43,146 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0a3be2714e6da9eb925be22aead0c4a5 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-16T20:38:43,152 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/.tmp/info/8e7ce8b9f86e47e0b4f465f8eac40153 is 1080, key is row0158/info:/1731789521122/Put/seqid=0 2024-11-16T20:38:43,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34309 is added to blk_1073741863_1039 (size=12516) 2024-11-16T20:38:43,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40163 is added to blk_1073741863_1039 (size=12516) 2024-11-16T20:38:43,159 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=215 (bloomFilter=true), to=hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/.tmp/info/8e7ce8b9f86e47e0b4f465f8eac40153 2024-11-16T20:38:43,165 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/.tmp/info/8e7ce8b9f86e47e0b4f465f8eac40153 as hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/8e7ce8b9f86e47e0b4f465f8eac40153 2024-11-16T20:38:43,171 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/8e7ce8b9f86e47e0b4f465f8eac40153, entries=7, sequenceid=215, filesize=12.2 K 2024-11-16T20:38:43,172 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=10.51 KB/10760 for 0a3be2714e6da9eb925be22aead0c4a5 in 26ms, sequenceid=215, compaction requested=true 2024-11-16T20:38:43,172 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0a3be2714e6da9eb925be22aead0c4a5: 2024-11-16T20:38:43,172 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0a3be2714e6da9eb925be22aead0c4a5:info, priority=-2147483648, current under compaction store size is 1 2024-11-16T20:38:43,172 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-16T20:38:43,172 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T20:38:43,173 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 126690 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-16T20:38:43,173 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.HStore(1541): 0a3be2714e6da9eb925be22aead0c4a5/info is initiating minor compaction (all files) 2024-11-16T20:38:43,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43411 {}] regionserver.HRegion(8855): Flush requested on 0a3be2714e6da9eb925be22aead0c4a5 2024-11-16T20:38:43,173 INFO [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 0a3be2714e6da9eb925be22aead0c4a5/info in TestLogRolling-testLogRolling,row0062,1731789497315.0a3be2714e6da9eb925be22aead0c4a5. 2024-11-16T20:38:43,173 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0a3be2714e6da9eb925be22aead0c4a5 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-16T20:38:43,173 INFO [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/e044e11d3b8e4b6cb92ba3e08dc41eeb, hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/8fa426acc999460a9b4173c7bc90a992, hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/8e7ce8b9f86e47e0b4f465f8eac40153] into tmpdir=hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/.tmp, totalSize=123.7 K 2024-11-16T20:38:43,174 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] compactions.Compactor(225): Compacting e044e11d3b8e4b6cb92ba3e08dc41eeb, keycount=73, bloomtype=ROW, size=82.4 K, encoding=NONE, compression=NONE, seqNum=178, earliestPutTs=1731789494809 2024-11-16T20:38:43,174 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] compactions.Compactor(225): Compacting 8fa426acc999460a9b4173c7bc90a992, keycount=23, bloomtype=ROW, size=29.1 K, encoding=NONE, compression=NONE, seqNum=205, earliestPutTs=1731789511040 2024-11-16T20:38:43,174 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] compactions.Compactor(225): Compacting 8e7ce8b9f86e47e0b4f465f8eac40153, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1731789521122 2024-11-16T20:38:43,177 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/.tmp/info/a6cd91023ce648099615a0e4bc3e88f2 is 1080, key is row0165/info:/1731789523147/Put/seqid=0 2024-11-16T20:38:43,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40163 is added to blk_1073741864_1040 (size=16828) 2024-11-16T20:38:43,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34309 is added to blk_1073741864_1040 (size=16828) 2024-11-16T20:38:43,182 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=229 (bloomFilter=true), to=hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/.tmp/info/a6cd91023ce648099615a0e4bc3e88f2 2024-11-16T20:38:43,191 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/.tmp/info/a6cd91023ce648099615a0e4bc3e88f2 as hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/a6cd91023ce648099615a0e4bc3e88f2 2024-11-16T20:38:43,196 INFO [RS:0;40c018648b21:43411-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0a3be2714e6da9eb925be22aead0c4a5#info#compaction#79 average throughput is 52.85 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-16T20:38:43,196 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/.tmp/info/8b860031812d4bdeb18d019d27a81da2 is 1080, key is row0062/info:/1731789494809/Put/seqid=0 2024-11-16T20:38:43,197 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/a6cd91023ce648099615a0e4bc3e88f2, entries=11, sequenceid=229, filesize=16.4 K 2024-11-16T20:38:43,198 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=12.61 KB/12912 for 0a3be2714e6da9eb925be22aead0c4a5 in 25ms, sequenceid=229, compaction requested=false 2024-11-16T20:38:43,198 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0a3be2714e6da9eb925be22aead0c4a5: 2024-11-16T20:38:43,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43411 {}] regionserver.HRegion(8855): Flush requested on 0a3be2714e6da9eb925be22aead0c4a5 2024-11-16T20:38:43,199 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0a3be2714e6da9eb925be22aead0c4a5 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-16T20:38:43,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40163 is added to blk_1073741865_1041 (size=116840) 2024-11-16T20:38:43,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34309 is added to blk_1073741865_1041 (size=116840) 2024-11-16T20:38:43,203 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/.tmp/info/9d779b5601e1428d86cdcbcec2bbbf08 is 1080, key is row0176/info:/1731789523174/Put/seqid=0 2024-11-16T20:38:43,206 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/.tmp/info/8b860031812d4bdeb18d019d27a81da2 as hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/8b860031812d4bdeb18d019d27a81da2 2024-11-16T20:38:43,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34309 is added to blk_1073741866_1042 (size=19000) 2024-11-16T20:38:43,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40163 is added to blk_1073741866_1042 (size=19000) 2024-11-16T20:38:43,208 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=245 (bloomFilter=true), to=hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/.tmp/info/9d779b5601e1428d86cdcbcec2bbbf08 2024-11-16T20:38:43,212 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/.tmp/info/9d779b5601e1428d86cdcbcec2bbbf08 as hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/9d779b5601e1428d86cdcbcec2bbbf08 2024-11-16T20:38:43,213 INFO [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 0a3be2714e6da9eb925be22aead0c4a5/info of 0a3be2714e6da9eb925be22aead0c4a5 into 8b860031812d4bdeb18d019d27a81da2(size=114.1 K), total size for store is 130.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-16T20:38:43,213 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 0a3be2714e6da9eb925be22aead0c4a5: 2024-11-16T20:38:43,213 INFO [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731789497315.0a3be2714e6da9eb925be22aead0c4a5., storeName=0a3be2714e6da9eb925be22aead0c4a5/info, priority=13, startTime=1731789523172; duration=0sec 2024-11-16T20:38:43,213 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T20:38:43,213 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0a3be2714e6da9eb925be22aead0c4a5:info 2024-11-16T20:38:43,217 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/9d779b5601e1428d86cdcbcec2bbbf08, entries=13, sequenceid=245, filesize=18.6 K 2024-11-16T20:38:43,218 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=4.20 KB/4304 for 0a3be2714e6da9eb925be22aead0c4a5 in 19ms, sequenceid=245, compaction requested=true 2024-11-16T20:38:43,218 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0a3be2714e6da9eb925be22aead0c4a5: 2024-11-16T20:38:43,218 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0a3be2714e6da9eb925be22aead0c4a5:info, priority=-2147483648, current under compaction store size is 1 2024-11-16T20:38:43,218 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T20:38:43,218 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-16T20:38:43,219 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 152668 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-16T20:38:43,219 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.HStore(1541): 0a3be2714e6da9eb925be22aead0c4a5/info is initiating minor compaction (all files) 2024-11-16T20:38:43,219 INFO [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 0a3be2714e6da9eb925be22aead0c4a5/info in TestLogRolling-testLogRolling,row0062,1731789497315.0a3be2714e6da9eb925be22aead0c4a5. 2024-11-16T20:38:43,219 INFO [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/8b860031812d4bdeb18d019d27a81da2, hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/a6cd91023ce648099615a0e4bc3e88f2, hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/9d779b5601e1428d86cdcbcec2bbbf08] into tmpdir=hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/.tmp, totalSize=149.1 K 2024-11-16T20:38:43,219 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] compactions.Compactor(225): Compacting 8b860031812d4bdeb18d019d27a81da2, keycount=103, bloomtype=ROW, size=114.1 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1731789494809 2024-11-16T20:38:43,220 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] compactions.Compactor(225): Compacting a6cd91023ce648099615a0e4bc3e88f2, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=229, earliestPutTs=1731789523147 2024-11-16T20:38:43,220 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9d779b5601e1428d86cdcbcec2bbbf08, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=245, earliestPutTs=1731789523174 2024-11-16T20:38:43,230 INFO [RS:0;40c018648b21:43411-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0a3be2714e6da9eb925be22aead0c4a5#info#compaction#81 average throughput is 65.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-16T20:38:43,231 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/.tmp/info/702ab19114174e9cbe6fa6145d904fcd is 1080, key is row0062/info:/1731789494809/Put/seqid=0 2024-11-16T20:38:43,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34309 is added to blk_1073741867_1043 (size=143019) 2024-11-16T20:38:43,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40163 is added to blk_1073741867_1043 (size=143019) 2024-11-16T20:38:43,239 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/.tmp/info/702ab19114174e9cbe6fa6145d904fcd as hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/702ab19114174e9cbe6fa6145d904fcd 2024-11-16T20:38:43,245 INFO [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 0a3be2714e6da9eb925be22aead0c4a5/info of 0a3be2714e6da9eb925be22aead0c4a5 into 702ab19114174e9cbe6fa6145d904fcd(size=139.7 K), total size for store is 139.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-16T20:38:43,245 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 0a3be2714e6da9eb925be22aead0c4a5: 2024-11-16T20:38:43,245 INFO [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731789497315.0a3be2714e6da9eb925be22aead0c4a5., storeName=0a3be2714e6da9eb925be22aead0c4a5/info, priority=13, startTime=1731789523218; duration=0sec 2024-11-16T20:38:43,245 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T20:38:43,245 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0a3be2714e6da9eb925be22aead0c4a5:info 2024-11-16T20:38:43,824 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:43,824 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:44,825 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:44,825 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:45,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43411 {}] regionserver.HRegion(8855): Flush requested on 0a3be2714e6da9eb925be22aead0c4a5 2024-11-16T20:38:45,221 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0a3be2714e6da9eb925be22aead0c4a5 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-16T20:38:45,225 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/.tmp/info/c95ea814b4e44804b0cec432b55e57a6 is 1080, key is row0189/info:/1731789523201/Put/seqid=0 2024-11-16T20:38:45,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34309 is added to blk_1073741868_1044 (size=12517) 2024-11-16T20:38:45,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40163 is added to blk_1073741868_1044 (size=12517) 2024-11-16T20:38:45,231 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=257 (bloomFilter=true), to=hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/.tmp/info/c95ea814b4e44804b0cec432b55e57a6 2024-11-16T20:38:45,236 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/.tmp/info/c95ea814b4e44804b0cec432b55e57a6 as hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/c95ea814b4e44804b0cec432b55e57a6 2024-11-16T20:38:45,241 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/c95ea814b4e44804b0cec432b55e57a6, entries=7, sequenceid=257, filesize=12.2 K 2024-11-16T20:38:45,241 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=10.51 KB/10760 for 0a3be2714e6da9eb925be22aead0c4a5 in 20ms, sequenceid=257, compaction requested=false 2024-11-16T20:38:45,241 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0a3be2714e6da9eb925be22aead0c4a5: 2024-11-16T20:38:45,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43411 {}] regionserver.HRegion(8855): Flush requested on 0a3be2714e6da9eb925be22aead0c4a5 2024-11-16T20:38:45,242 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0a3be2714e6da9eb925be22aead0c4a5 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-16T20:38:45,245 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/.tmp/info/6d6dad7a1c5a4cb59aca9cc6ddc90ad0 is 1080, key is row0196/info:/1731789525223/Put/seqid=0 2024-11-16T20:38:45,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40163 is added to blk_1073741869_1045 (size=16839) 2024-11-16T20:38:45,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34309 is added to blk_1073741869_1045 (size=16839) 2024-11-16T20:38:45,251 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=271 (bloomFilter=true), to=hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/.tmp/info/6d6dad7a1c5a4cb59aca9cc6ddc90ad0 2024-11-16T20:38:45,257 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/.tmp/info/6d6dad7a1c5a4cb59aca9cc6ddc90ad0 as hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/6d6dad7a1c5a4cb59aca9cc6ddc90ad0 2024-11-16T20:38:45,262 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/6d6dad7a1c5a4cb59aca9cc6ddc90ad0, entries=11, sequenceid=271, filesize=16.4 K 2024-11-16T20:38:45,263 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=11.56 KB/11836 for 0a3be2714e6da9eb925be22aead0c4a5 in 21ms, sequenceid=271, compaction requested=true 2024-11-16T20:38:45,263 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0a3be2714e6da9eb925be22aead0c4a5: 2024-11-16T20:38:45,263 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0a3be2714e6da9eb925be22aead0c4a5:info, priority=-2147483648, current under compaction store size is 1 2024-11-16T20:38:45,263 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-16T20:38:45,263 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T20:38:45,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43411 {}] regionserver.HRegion(8855): Flush requested on 0a3be2714e6da9eb925be22aead0c4a5 2024-11-16T20:38:45,264 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0a3be2714e6da9eb925be22aead0c4a5 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-16T20:38:45,264 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 172375 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-16T20:38:45,264 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.HStore(1541): 0a3be2714e6da9eb925be22aead0c4a5/info is initiating minor compaction (all files) 2024-11-16T20:38:45,264 INFO [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 0a3be2714e6da9eb925be22aead0c4a5/info in TestLogRolling-testLogRolling,row0062,1731789497315.0a3be2714e6da9eb925be22aead0c4a5. 2024-11-16T20:38:45,264 INFO [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/702ab19114174e9cbe6fa6145d904fcd, hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/c95ea814b4e44804b0cec432b55e57a6, hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/6d6dad7a1c5a4cb59aca9cc6ddc90ad0] into tmpdir=hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/.tmp, totalSize=168.3 K 2024-11-16T20:38:45,265 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] compactions.Compactor(225): Compacting 702ab19114174e9cbe6fa6145d904fcd, keycount=127, bloomtype=ROW, size=139.7 K, encoding=NONE, compression=NONE, seqNum=245, earliestPutTs=1731789494809 2024-11-16T20:38:45,265 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] compactions.Compactor(225): Compacting c95ea814b4e44804b0cec432b55e57a6, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=257, earliestPutTs=1731789523201 2024-11-16T20:38:45,266 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] compactions.Compactor(225): Compacting 6d6dad7a1c5a4cb59aca9cc6ddc90ad0, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=271, earliestPutTs=1731789525223 2024-11-16T20:38:45,268 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/.tmp/info/53241e3870f04076b4c28ffa53fbccd6 is 1080, key is row0207/info:/1731789525243/Put/seqid=0 2024-11-16T20:38:45,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34309 is added to blk_1073741870_1046 (size=17918) 2024-11-16T20:38:45,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40163 is added to blk_1073741870_1046 (size=17918) 2024-11-16T20:38:45,274 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=286 (bloomFilter=true), to=hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/.tmp/info/53241e3870f04076b4c28ffa53fbccd6 2024-11-16T20:38:45,280 INFO [RS:0;40c018648b21:43411-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0a3be2714e6da9eb925be22aead0c4a5#info#compaction#85 average throughput is 37.20 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-16T20:38:45,280 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/.tmp/info/53241e3870f04076b4c28ffa53fbccd6 as hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/53241e3870f04076b4c28ffa53fbccd6 2024-11-16T20:38:45,281 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/.tmp/info/a7da3e473e984bdf8594bf36660d46fd is 1080, key is row0062/info:/1731789494809/Put/seqid=0 2024-11-16T20:38:45,285 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/53241e3870f04076b4c28ffa53fbccd6, entries=12, sequenceid=286, filesize=17.5 K 2024-11-16T20:38:45,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40163 is added to blk_1073741871_1047 (size=162541) 2024-11-16T20:38:45,286 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=6.30 KB/6456 for 0a3be2714e6da9eb925be22aead0c4a5 in 22ms, sequenceid=286, compaction requested=false 2024-11-16T20:38:45,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34309 is added to blk_1073741871_1047 (size=162541) 2024-11-16T20:38:45,286 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0a3be2714e6da9eb925be22aead0c4a5: 2024-11-16T20:38:45,291 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/.tmp/info/a7da3e473e984bdf8594bf36660d46fd as hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/a7da3e473e984bdf8594bf36660d46fd 2024-11-16T20:38:45,296 INFO [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 0a3be2714e6da9eb925be22aead0c4a5/info of 0a3be2714e6da9eb925be22aead0c4a5 into a7da3e473e984bdf8594bf36660d46fd(size=158.7 K), total size for store is 176.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-16T20:38:45,296 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 0a3be2714e6da9eb925be22aead0c4a5: 2024-11-16T20:38:45,296 INFO [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731789497315.0a3be2714e6da9eb925be22aead0c4a5., storeName=0a3be2714e6da9eb925be22aead0c4a5/info, priority=13, startTime=1731789525263; duration=0sec 2024-11-16T20:38:45,296 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T20:38:45,296 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0a3be2714e6da9eb925be22aead0c4a5:info 2024-11-16T20:38:45,670 DEBUG [master/40c018648b21:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=3, created chunk count=9, reused chunk count=70, reuseRatio=88.61% 2024-11-16T20:38:45,670 DEBUG [master/40c018648b21:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-11-16T20:38:45,827 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:45,827 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:46,828 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:46,828 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:47,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43411 {}] regionserver.HRegion(8855): Flush requested on 0a3be2714e6da9eb925be22aead0c4a5 2024-11-16T20:38:47,320 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0a3be2714e6da9eb925be22aead0c4a5 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-16T20:38:47,327 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/.tmp/info/1a9f85328a4d43698d8a491be58200b1 is 1080, key is row0219/info:/1731789525265/Put/seqid=0 2024-11-16T20:38:47,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34309 is added to blk_1073741872_1048 (size=12523) 2024-11-16T20:38:47,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40163 is added to blk_1073741872_1048 (size=12523) 2024-11-16T20:38:47,334 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=297 (bloomFilter=true), to=hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/.tmp/info/1a9f85328a4d43698d8a491be58200b1 2024-11-16T20:38:47,340 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/.tmp/info/1a9f85328a4d43698d8a491be58200b1 as hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/1a9f85328a4d43698d8a491be58200b1 2024-11-16T20:38:47,347 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/1a9f85328a4d43698d8a491be58200b1, entries=7, sequenceid=297, filesize=12.2 K 2024-11-16T20:38:47,348 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=11.56 KB/11836 for 0a3be2714e6da9eb925be22aead0c4a5 in 28ms, sequenceid=297, compaction requested=true 2024-11-16T20:38:47,348 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0a3be2714e6da9eb925be22aead0c4a5: 2024-11-16T20:38:47,348 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0a3be2714e6da9eb925be22aead0c4a5:info, priority=-2147483648, current under compaction store size is 1 2024-11-16T20:38:47,348 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T20:38:47,348 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-16T20:38:47,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43411 {}] regionserver.HRegion(8855): Flush requested on 0a3be2714e6da9eb925be22aead0c4a5 2024-11-16T20:38:47,348 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0a3be2714e6da9eb925be22aead0c4a5 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-16T20:38:47,350 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 192982 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-16T20:38:47,350 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.HStore(1541): 0a3be2714e6da9eb925be22aead0c4a5/info is initiating minor compaction (all files) 2024-11-16T20:38:47,350 INFO [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 0a3be2714e6da9eb925be22aead0c4a5/info in TestLogRolling-testLogRolling,row0062,1731789497315.0a3be2714e6da9eb925be22aead0c4a5. 2024-11-16T20:38:47,350 INFO [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/a7da3e473e984bdf8594bf36660d46fd, hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/53241e3870f04076b4c28ffa53fbccd6, hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/1a9f85328a4d43698d8a491be58200b1] into tmpdir=hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/.tmp, totalSize=188.5 K 2024-11-16T20:38:47,350 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] compactions.Compactor(225): Compacting a7da3e473e984bdf8594bf36660d46fd, keycount=145, bloomtype=ROW, size=158.7 K, encoding=NONE, compression=NONE, seqNum=271, earliestPutTs=1731789494809 2024-11-16T20:38:47,351 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] compactions.Compactor(225): Compacting 53241e3870f04076b4c28ffa53fbccd6, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=286, earliestPutTs=1731789525243 2024-11-16T20:38:47,351 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1a9f85328a4d43698d8a491be58200b1, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=297, earliestPutTs=1731789525265 2024-11-16T20:38:47,353 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/.tmp/info/de26b3dce0c34a0b8af30e5d15928ae9 is 1080, key is row0226/info:/1731789527323/Put/seqid=0 2024-11-16T20:38:47,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34309 is added to blk_1073741873_1049 (size=17918) 2024-11-16T20:38:47,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40163 is added to blk_1073741873_1049 (size=17918) 2024-11-16T20:38:47,358 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=312 (bloomFilter=true), to=hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/.tmp/info/de26b3dce0c34a0b8af30e5d15928ae9 2024-11-16T20:38:47,364 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/.tmp/info/de26b3dce0c34a0b8af30e5d15928ae9 as hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/de26b3dce0c34a0b8af30e5d15928ae9 2024-11-16T20:38:47,365 INFO [RS:0;40c018648b21:43411-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0a3be2714e6da9eb925be22aead0c4a5#info#compaction#88 average throughput is 56.10 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-16T20:38:47,366 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/.tmp/info/8d0823c88f2e444081cbad57329c6699 is 1080, key is row0062/info:/1731789494809/Put/seqid=0 2024-11-16T20:38:47,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34309 is added to blk_1073741874_1050 (size=183148) 2024-11-16T20:38:47,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40163 is added to blk_1073741874_1050 (size=183148) 2024-11-16T20:38:47,369 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/de26b3dce0c34a0b8af30e5d15928ae9, entries=12, sequenceid=312, filesize=17.5 K 2024-11-16T20:38:47,370 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=10.51 KB/10760 for 0a3be2714e6da9eb925be22aead0c4a5 in 22ms, sequenceid=312, compaction requested=false 2024-11-16T20:38:47,370 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0a3be2714e6da9eb925be22aead0c4a5: 2024-11-16T20:38:47,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43411 {}] regionserver.HRegion(8855): Flush requested on 0a3be2714e6da9eb925be22aead0c4a5 2024-11-16T20:38:47,371 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0a3be2714e6da9eb925be22aead0c4a5 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-16T20:38:47,375 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/.tmp/info/8d0823c88f2e444081cbad57329c6699 as hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/8d0823c88f2e444081cbad57329c6699 2024-11-16T20:38:47,375 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/.tmp/info/01d8aaad638441d48d6b03660364f9df is 1080, key is row0238/info:/1731789527350/Put/seqid=0 2024-11-16T20:38:47,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40163 is added to blk_1073741875_1051 (size=16839) 2024-11-16T20:38:47,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34309 is added to blk_1073741875_1051 (size=16839) 2024-11-16T20:38:47,382 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=326 (bloomFilter=true), to=hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/.tmp/info/01d8aaad638441d48d6b03660364f9df 2024-11-16T20:38:47,382 INFO [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 0a3be2714e6da9eb925be22aead0c4a5/info of 0a3be2714e6da9eb925be22aead0c4a5 into 8d0823c88f2e444081cbad57329c6699(size=178.9 K), total size for store is 196.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-16T20:38:47,382 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 0a3be2714e6da9eb925be22aead0c4a5: 2024-11-16T20:38:47,382 INFO [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731789497315.0a3be2714e6da9eb925be22aead0c4a5., storeName=0a3be2714e6da9eb925be22aead0c4a5/info, priority=13, startTime=1731789527348; duration=0sec 2024-11-16T20:38:47,383 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T20:38:47,383 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0a3be2714e6da9eb925be22aead0c4a5:info 2024-11-16T20:38:47,388 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/.tmp/info/01d8aaad638441d48d6b03660364f9df as hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/01d8aaad638441d48d6b03660364f9df 2024-11-16T20:38:47,392 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/01d8aaad638441d48d6b03660364f9df, entries=11, sequenceid=326, filesize=16.4 K 2024-11-16T20:38:47,393 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=8.41 KB/8608 for 0a3be2714e6da9eb925be22aead0c4a5 in 22ms, sequenceid=326, compaction requested=true 2024-11-16T20:38:47,394 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0a3be2714e6da9eb925be22aead0c4a5: 2024-11-16T20:38:47,394 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0a3be2714e6da9eb925be22aead0c4a5:info, priority=-2147483648, current under compaction store size is 1 2024-11-16T20:38:47,394 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T20:38:47,394 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-16T20:38:47,395 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 217905 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-16T20:38:47,395 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.HStore(1541): 0a3be2714e6da9eb925be22aead0c4a5/info is initiating minor compaction (all files) 2024-11-16T20:38:47,395 INFO [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 0a3be2714e6da9eb925be22aead0c4a5/info in TestLogRolling-testLogRolling,row0062,1731789497315.0a3be2714e6da9eb925be22aead0c4a5. 2024-11-16T20:38:47,395 INFO [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/8d0823c88f2e444081cbad57329c6699, hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/de26b3dce0c34a0b8af30e5d15928ae9, hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/01d8aaad638441d48d6b03660364f9df] into tmpdir=hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/.tmp, totalSize=212.8 K 2024-11-16T20:38:47,395 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] compactions.Compactor(225): Compacting 8d0823c88f2e444081cbad57329c6699, keycount=164, bloomtype=ROW, size=178.9 K, encoding=NONE, compression=NONE, seqNum=297, earliestPutTs=1731789494809 2024-11-16T20:38:47,396 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] compactions.Compactor(225): Compacting de26b3dce0c34a0b8af30e5d15928ae9, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=312, earliestPutTs=1731789527323 2024-11-16T20:38:47,396 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] compactions.Compactor(225): Compacting 01d8aaad638441d48d6b03660364f9df, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=326, earliestPutTs=1731789527350 2024-11-16T20:38:47,407 INFO [RS:0;40c018648b21:43411-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0a3be2714e6da9eb925be22aead0c4a5#info#compaction#90 average throughput is 47.97 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-16T20:38:47,408 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/.tmp/info/e8b912252bcc4463af4d69776aeca093 is 1080, key is row0062/info:/1731789494809/Put/seqid=0 2024-11-16T20:38:47,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34309 is added to blk_1073741876_1052 (size=208144) 2024-11-16T20:38:47,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40163 is added to blk_1073741876_1052 (size=208144) 2024-11-16T20:38:47,414 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/.tmp/info/e8b912252bcc4463af4d69776aeca093 as hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/e8b912252bcc4463af4d69776aeca093 2024-11-16T20:38:47,419 INFO [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 0a3be2714e6da9eb925be22aead0c4a5/info of 0a3be2714e6da9eb925be22aead0c4a5 into e8b912252bcc4463af4d69776aeca093(size=203.3 K), total size for store is 203.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-16T20:38:47,419 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 0a3be2714e6da9eb925be22aead0c4a5: 2024-11-16T20:38:47,419 INFO [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731789497315.0a3be2714e6da9eb925be22aead0c4a5., storeName=0a3be2714e6da9eb925be22aead0c4a5/info, priority=13, startTime=1731789527394; duration=0sec 2024-11-16T20:38:47,419 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T20:38:47,419 DEBUG [RS:0;40c018648b21:43411-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0a3be2714e6da9eb925be22aead0c4a5:info 2024-11-16T20:38:47,829 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:47,829 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:48,830 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:48,830 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:49,387 INFO [Time-limited test {}] wal.AbstractTestLogRolling(285): after writing there are 0 log files 2024-11-16T20:38:49,388 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 40c018648b21%2C43411%2C1731789471222.1731789529387 2024-11-16T20:38:49,412 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:38:49,412 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:38:49,412 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:38:49,413 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:38:49,413 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:38:49,413 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/WALs/40c018648b21,43411,1731789471222/40c018648b21%2C43411%2C1731789471222.1731789471855 with entries=315, filesize=309.56 KB; new WAL /user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/WALs/40c018648b21,43411,1731789471222/40c018648b21%2C43411%2C1731789471222.1731789529387 2024-11-16T20:38:49,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34309 is added to blk_1073741833_1009 (size=316995) 2024-11-16T20:38:49,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40163 is added to blk_1073741833_1009 (size=316995) 2024-11-16T20:38:49,420 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41755:41755),(127.0.0.1/127.0.0.1:35917:35917)] 2024-11-16T20:38:49,423 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for f1a3a111b221e74628f64224b1aaf85e: 2024-11-16T20:38:49,423 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=670 B heapSize=2.02 KB 2024-11-16T20:38:49,428 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/hbase/meta/1588230740/.tmp/info/2f69586de69c46ff9f4f1d7e94b4666b is 186, key is TestLogRolling-testLogRolling,,1731789497315.f1a3a111b221e74628f64224b1aaf85e./info:regioninfo/1731789498046/Put/seqid=0 2024-11-16T20:38:49,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40163 is added to blk_1073741878_1054 (size=6153) 2024-11-16T20:38:49,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34309 is added to blk_1073741878_1054 (size=6153) 2024-11-16T20:38:49,432 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=670 B at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/hbase/meta/1588230740/.tmp/info/2f69586de69c46ff9f4f1d7e94b4666b 2024-11-16T20:38:49,437 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/hbase/meta/1588230740/.tmp/info/2f69586de69c46ff9f4f1d7e94b4666b as hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/hbase/meta/1588230740/info/2f69586de69c46ff9f4f1d7e94b4666b 2024-11-16T20:38:49,441 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/hbase/meta/1588230740/info/2f69586de69c46ff9f4f1d7e94b4666b, entries=5, sequenceid=21, filesize=6.0 K 2024-11-16T20:38:49,442 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~670 B/670, heapSize ~1.25 KB/1280, currentSize=0 B/0 for 1588230740 in 19ms, sequenceid=21, compaction requested=false 2024-11-16T20:38:49,442 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-16T20:38:49,442 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 0a3be2714e6da9eb925be22aead0c4a5 1/1 column families, dataSize=8.41 KB heapSize=9.25 KB 2024-11-16T20:38:49,446 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/.tmp/info/48566f5adae54877b7d74acf464cd91f is 1080, key is row0249/info:/1731789527372/Put/seqid=0 2024-11-16T20:38:49,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40163 is added to blk_1073741879_1055 (size=13602) 2024-11-16T20:38:49,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34309 is added to blk_1073741879_1055 (size=13602) 2024-11-16T20:38:49,450 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.41 KB at sequenceid=339 (bloomFilter=true), to=hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/.tmp/info/48566f5adae54877b7d74acf464cd91f 2024-11-16T20:38:49,454 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/.tmp/info/48566f5adae54877b7d74acf464cd91f as hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/48566f5adae54877b7d74acf464cd91f 2024-11-16T20:38:49,458 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/48566f5adae54877b7d74acf464cd91f, entries=8, sequenceid=339, filesize=13.3 K 2024-11-16T20:38:49,459 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~8.41 KB/8608, heapSize ~9.23 KB/9456, currentSize=0 B/0 for 0a3be2714e6da9eb925be22aead0c4a5 in 17ms, sequenceid=339, compaction requested=false 2024-11-16T20:38:49,459 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 0a3be2714e6da9eb925be22aead0c4a5: 2024-11-16T20:38:49,459 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 40c018648b21%2C43411%2C1731789471222.1731789529459 2024-11-16T20:38:49,464 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:38:49,464 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:38:49,464 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:38:49,464 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:38:49,464 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:38:49,464 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/WALs/40c018648b21,43411,1731789471222/40c018648b21%2C43411%2C1731789471222.1731789529387 with entries=2, filesize=723 B; new WAL /user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/WALs/40c018648b21,43411,1731789471222/40c018648b21%2C43411%2C1731789471222.1731789529459 2024-11-16T20:38:49,465 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35917:35917),(127.0.0.1/127.0.0.1:41755:41755)] 2024-11-16T20:38:49,465 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/WALs/40c018648b21,43411,1731789471222/40c018648b21%2C43411%2C1731789471222.1731789529387 is not closed yet, will try archiving it next time 2024-11-16T20:38:49,465 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/WALs/40c018648b21,43411,1731789471222/40c018648b21%2C43411%2C1731789471222.1731789471855 to hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/oldWALs/40c018648b21%2C43411%2C1731789471222.1731789471855 2024-11-16T20:38:49,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40163 is added to blk_1073741877_1053 (size=731) 2024-11-16T20:38:49,465 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [5,000] milli-secs(wait.for.ratio=[1]) 2024-11-16T20:38:49,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34309 is added to blk_1073741877_1053 (size=731) 2024-11-16T20:38:49,466 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/WALs/40c018648b21,43411,1731789471222/40c018648b21%2C43411%2C1731789471222.1731789529387 to hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/oldWALs/40c018648b21%2C43411%2C1731789471222.1731789529387 2024-11-16T20:38:49,566 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-16T20:38:49,566 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-16T20:38:49,566 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T20:38:49,566 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T20:38:49,566 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T20:38:49,566 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-16T20:38:49,566 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-16T20:38:49,566 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=2075433954, stopped=false 2024-11-16T20:38:49,566 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=40c018648b21,44513,1731789471042 2024-11-16T20:38:49,636 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43411-0x101455e38dc0001, quorum=127.0.0.1:56747, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-16T20:38:49,636 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44513-0x101455e38dc0000, quorum=127.0.0.1:56747, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-16T20:38:49,636 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43411-0x101455e38dc0001, quorum=127.0.0.1:56747, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:38:49,636 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44513-0x101455e38dc0000, quorum=127.0.0.1:56747, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:38:49,636 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-16T20:38:49,636 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-16T20:38:49,636 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T20:38:49,636 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T20:38:49,636 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:44513-0x101455e38dc0000, quorum=127.0.0.1:56747, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T20:38:49,636 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:43411-0x101455e38dc0001, quorum=127.0.0.1:56747, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T20:38:49,636 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '40c018648b21,43411,1731789471222' ***** 2024-11-16T20:38:49,636 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-16T20:38:49,637 INFO [RS:0;40c018648b21:43411 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-16T20:38:49,637 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-16T20:38:49,637 INFO [RS:0;40c018648b21:43411 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-16T20:38:49,637 INFO [RS:0;40c018648b21:43411 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-16T20:38:49,637 INFO [RS:0;40c018648b21:43411 {}] regionserver.HRegionServer(3091): Received CLOSE for f1a3a111b221e74628f64224b1aaf85e 2024-11-16T20:38:49,637 INFO [RS:0;40c018648b21:43411 {}] regionserver.HRegionServer(3091): Received CLOSE for 0a3be2714e6da9eb925be22aead0c4a5 2024-11-16T20:38:49,637 INFO [RS:0;40c018648b21:43411 {}] regionserver.HRegionServer(959): stopping server 40c018648b21,43411,1731789471222 2024-11-16T20:38:49,637 INFO [RS:0;40c018648b21:43411 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-16T20:38:49,637 INFO [RS:0;40c018648b21:43411 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;40c018648b21:43411. 2024-11-16T20:38:49,637 DEBUG [RS_CLOSE_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing f1a3a111b221e74628f64224b1aaf85e, disabling compactions & flushes 2024-11-16T20:38:49,637 INFO [RS_CLOSE_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731789497315.f1a3a111b221e74628f64224b1aaf85e. 2024-11-16T20:38:49,637 DEBUG [RS:0;40c018648b21:43411 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T20:38:49,637 DEBUG [RS_CLOSE_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731789497315.f1a3a111b221e74628f64224b1aaf85e. 2024-11-16T20:38:49,637 DEBUG [RS:0;40c018648b21:43411 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T20:38:49,637 DEBUG [RS_CLOSE_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731789497315.f1a3a111b221e74628f64224b1aaf85e. after waiting 0 ms 2024-11-16T20:38:49,637 DEBUG [RS_CLOSE_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731789497315.f1a3a111b221e74628f64224b1aaf85e. 2024-11-16T20:38:49,637 INFO [RS:0;40c018648b21:43411 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-16T20:38:49,637 INFO [RS:0;40c018648b21:43411 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-16T20:38:49,638 INFO [RS:0;40c018648b21:43411 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-16T20:38:49,638 INFO [RS:0;40c018648b21:43411 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-16T20:38:49,638 INFO [RS:0;40c018648b21:43411 {}] regionserver.HRegionServer(1321): Waiting on 3 regions to close 2024-11-16T20:38:49,638 DEBUG [RS:0;40c018648b21:43411 {}] regionserver.HRegionServer(1325): Online Regions={f1a3a111b221e74628f64224b1aaf85e=TestLogRolling-testLogRolling,,1731789497315.f1a3a111b221e74628f64224b1aaf85e., 1588230740=hbase:meta,,1.1588230740, 0a3be2714e6da9eb925be22aead0c4a5=TestLogRolling-testLogRolling,row0062,1731789497315.0a3be2714e6da9eb925be22aead0c4a5.} 2024-11-16T20:38:49,638 DEBUG [RS:0;40c018648b21:43411 {}] regionserver.HRegionServer(1351): Waiting on 0a3be2714e6da9eb925be22aead0c4a5, 1588230740, f1a3a111b221e74628f64224b1aaf85e 2024-11-16T20:38:49,638 DEBUG [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-16T20:38:49,638 INFO [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-16T20:38:49,638 DEBUG [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-16T20:38:49,638 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731789497315.f1a3a111b221e74628f64224b1aaf85e.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/f1a3a111b221e74628f64224b1aaf85e/info/adf6f2f98ccf48c4beb848e39355383d.ec472999ca7ae234bff5f83872160c17->hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17/info/adf6f2f98ccf48c4beb848e39355383d-bottom] to archive 2024-11-16T20:38:49,638 DEBUG [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-16T20:38:49,638 DEBUG [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-16T20:38:49,639 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731789497315.f1a3a111b221e74628f64224b1aaf85e.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-16T20:38:49,640 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731789497315.f1a3a111b221e74628f64224b1aaf85e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/f1a3a111b221e74628f64224b1aaf85e/info/adf6f2f98ccf48c4beb848e39355383d.ec472999ca7ae234bff5f83872160c17 to hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/archive/data/default/TestLogRolling-testLogRolling/f1a3a111b221e74628f64224b1aaf85e/info/adf6f2f98ccf48c4beb848e39355383d.ec472999ca7ae234bff5f83872160c17 2024-11-16T20:38:49,640 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731789497315.f1a3a111b221e74628f64224b1aaf85e.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=40c018648b21:44513 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-16T20:38:49,641 WARN [StoreCloser-TestLogRolling-testLogRolling,,1731789497315.f1a3a111b221e74628f64224b1aaf85e.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [] 2024-11-16T20:38:49,644 DEBUG [RS_CLOSE_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/f1a3a111b221e74628f64224b1aaf85e/recovered.edits/131.seqid, newMaxSeqId=131, maxSeqId=126 2024-11-16T20:38:49,644 DEBUG [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/hbase/meta/1588230740/recovered.edits/24.seqid, newMaxSeqId=24, maxSeqId=1 2024-11-16T20:38:49,645 INFO [RS_CLOSE_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731789497315.f1a3a111b221e74628f64224b1aaf85e. 2024-11-16T20:38:49,645 DEBUG [RS_CLOSE_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for f1a3a111b221e74628f64224b1aaf85e: Waiting for close lock at 1731789529637Running coprocessor pre-close hooks at 1731789529637Disabling compacts and flushes for region at 1731789529637Disabling writes for close at 1731789529637Writing region close event to WAL at 1731789529641 (+4 ms)Running coprocessor post-close hooks at 1731789529645 (+4 ms)Closed at 1731789529645 2024-11-16T20:38:49,645 DEBUG [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-16T20:38:49,645 INFO [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-16T20:38:49,645 DEBUG [RS_CLOSE_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,,1731789497315.f1a3a111b221e74628f64224b1aaf85e. 2024-11-16T20:38:49,645 DEBUG [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731789529638Running coprocessor pre-close hooks at 1731789529638Disabling compacts and flushes for region at 1731789529638Disabling writes for close at 1731789529638Writing region close event to WAL at 1731789529642 (+4 ms)Running coprocessor post-close hooks at 1731789529645 (+3 ms)Closed at 1731789529645 2024-11-16T20:38:49,645 DEBUG [RS_CLOSE_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 0a3be2714e6da9eb925be22aead0c4a5, disabling compactions & flushes 2024-11-16T20:38:49,645 DEBUG [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-16T20:38:49,645 INFO [RS_CLOSE_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,row0062,1731789497315.0a3be2714e6da9eb925be22aead0c4a5. 2024-11-16T20:38:49,645 DEBUG [RS_CLOSE_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,row0062,1731789497315.0a3be2714e6da9eb925be22aead0c4a5. 2024-11-16T20:38:49,645 DEBUG [RS_CLOSE_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,row0062,1731789497315.0a3be2714e6da9eb925be22aead0c4a5. after waiting 0 ms 2024-11-16T20:38:49,645 DEBUG [RS_CLOSE_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,row0062,1731789497315.0a3be2714e6da9eb925be22aead0c4a5. 2024-11-16T20:38:49,646 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731789497315.0a3be2714e6da9eb925be22aead0c4a5.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/adf6f2f98ccf48c4beb848e39355383d.ec472999ca7ae234bff5f83872160c17->hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/ec472999ca7ae234bff5f83872160c17/info/adf6f2f98ccf48c4beb848e39355383d-top, hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/TestLogRolling-testLogRolling=ec472999ca7ae234bff5f83872160c17-f07bc69420ab4bf49fec50aaa07fddc5, hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/TestLogRolling-testLogRolling=ec472999ca7ae234bff5f83872160c17-f40e67c765794f5c9aac797adf80964b, hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/b8a82a341a6d4e55875487a371e6ba39, hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/TestLogRolling-testLogRolling=ec472999ca7ae234bff5f83872160c17-61f4ea80bf044926a61a65d26772c65d, hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/c8f8857fe7bf46d8bc7d60b0362faded, hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/8eded2fc4d8c46779f5b9ca76d5bfe83, hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/78c981b06f5b41679837b947f241c31f, hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/f561bc04a953458fa7ac4e2a08fc00a9, hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/e044e11d3b8e4b6cb92ba3e08dc41eeb, hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/7626fbc2ce724419b8e323e84f2a63db, hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/8fa426acc999460a9b4173c7bc90a992, hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/8b860031812d4bdeb18d019d27a81da2, hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/8e7ce8b9f86e47e0b4f465f8eac40153, hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/a6cd91023ce648099615a0e4bc3e88f2, hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/702ab19114174e9cbe6fa6145d904fcd, hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/9d779b5601e1428d86cdcbcec2bbbf08, hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/c95ea814b4e44804b0cec432b55e57a6, hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/a7da3e473e984bdf8594bf36660d46fd, hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/6d6dad7a1c5a4cb59aca9cc6ddc90ad0, hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/53241e3870f04076b4c28ffa53fbccd6, hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/8d0823c88f2e444081cbad57329c6699, hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/1a9f85328a4d43698d8a491be58200b1, hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/de26b3dce0c34a0b8af30e5d15928ae9, hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/01d8aaad638441d48d6b03660364f9df] to archive 2024-11-16T20:38:49,647 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731789497315.0a3be2714e6da9eb925be22aead0c4a5.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-16T20:38:49,648 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731789497315.0a3be2714e6da9eb925be22aead0c4a5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/adf6f2f98ccf48c4beb848e39355383d.ec472999ca7ae234bff5f83872160c17 to hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/archive/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/adf6f2f98ccf48c4beb848e39355383d.ec472999ca7ae234bff5f83872160c17 2024-11-16T20:38:49,649 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731789497315.0a3be2714e6da9eb925be22aead0c4a5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/TestLogRolling-testLogRolling=ec472999ca7ae234bff5f83872160c17-f07bc69420ab4bf49fec50aaa07fddc5 to hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/archive/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/TestLogRolling-testLogRolling=ec472999ca7ae234bff5f83872160c17-f07bc69420ab4bf49fec50aaa07fddc5 2024-11-16T20:38:49,650 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731789497315.0a3be2714e6da9eb925be22aead0c4a5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/TestLogRolling-testLogRolling=ec472999ca7ae234bff5f83872160c17-f40e67c765794f5c9aac797adf80964b to hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/archive/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/TestLogRolling-testLogRolling=ec472999ca7ae234bff5f83872160c17-f40e67c765794f5c9aac797adf80964b 2024-11-16T20:38:49,651 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731789497315.0a3be2714e6da9eb925be22aead0c4a5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/b8a82a341a6d4e55875487a371e6ba39 to hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/archive/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/b8a82a341a6d4e55875487a371e6ba39 2024-11-16T20:38:49,653 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731789497315.0a3be2714e6da9eb925be22aead0c4a5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/TestLogRolling-testLogRolling=ec472999ca7ae234bff5f83872160c17-61f4ea80bf044926a61a65d26772c65d to hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/archive/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/TestLogRolling-testLogRolling=ec472999ca7ae234bff5f83872160c17-61f4ea80bf044926a61a65d26772c65d 2024-11-16T20:38:49,654 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731789497315.0a3be2714e6da9eb925be22aead0c4a5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/c8f8857fe7bf46d8bc7d60b0362faded to hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/archive/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/c8f8857fe7bf46d8bc7d60b0362faded 2024-11-16T20:38:49,655 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731789497315.0a3be2714e6da9eb925be22aead0c4a5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/8eded2fc4d8c46779f5b9ca76d5bfe83 to hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/archive/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/8eded2fc4d8c46779f5b9ca76d5bfe83 2024-11-16T20:38:49,656 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731789497315.0a3be2714e6da9eb925be22aead0c4a5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/78c981b06f5b41679837b947f241c31f to hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/archive/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/78c981b06f5b41679837b947f241c31f 2024-11-16T20:38:49,657 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731789497315.0a3be2714e6da9eb925be22aead0c4a5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/f561bc04a953458fa7ac4e2a08fc00a9 to hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/archive/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/f561bc04a953458fa7ac4e2a08fc00a9 2024-11-16T20:38:49,658 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731789497315.0a3be2714e6da9eb925be22aead0c4a5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/e044e11d3b8e4b6cb92ba3e08dc41eeb to hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/archive/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/e044e11d3b8e4b6cb92ba3e08dc41eeb 2024-11-16T20:38:49,659 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731789497315.0a3be2714e6da9eb925be22aead0c4a5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/7626fbc2ce724419b8e323e84f2a63db to hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/archive/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/7626fbc2ce724419b8e323e84f2a63db 2024-11-16T20:38:49,660 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731789497315.0a3be2714e6da9eb925be22aead0c4a5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/8fa426acc999460a9b4173c7bc90a992 to hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/archive/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/8fa426acc999460a9b4173c7bc90a992 2024-11-16T20:38:49,660 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731789497315.0a3be2714e6da9eb925be22aead0c4a5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/8b860031812d4bdeb18d019d27a81da2 to hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/archive/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/8b860031812d4bdeb18d019d27a81da2 2024-11-16T20:38:49,661 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731789497315.0a3be2714e6da9eb925be22aead0c4a5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/8e7ce8b9f86e47e0b4f465f8eac40153 to hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/archive/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/8e7ce8b9f86e47e0b4f465f8eac40153 2024-11-16T20:38:49,662 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731789497315.0a3be2714e6da9eb925be22aead0c4a5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/a6cd91023ce648099615a0e4bc3e88f2 to hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/archive/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/a6cd91023ce648099615a0e4bc3e88f2 2024-11-16T20:38:49,663 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731789497315.0a3be2714e6da9eb925be22aead0c4a5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/702ab19114174e9cbe6fa6145d904fcd to hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/archive/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/702ab19114174e9cbe6fa6145d904fcd 2024-11-16T20:38:49,664 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731789497315.0a3be2714e6da9eb925be22aead0c4a5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/9d779b5601e1428d86cdcbcec2bbbf08 to hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/archive/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/9d779b5601e1428d86cdcbcec2bbbf08 2024-11-16T20:38:49,664 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731789497315.0a3be2714e6da9eb925be22aead0c4a5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/c95ea814b4e44804b0cec432b55e57a6 to hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/archive/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/c95ea814b4e44804b0cec432b55e57a6 2024-11-16T20:38:49,665 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731789497315.0a3be2714e6da9eb925be22aead0c4a5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/a7da3e473e984bdf8594bf36660d46fd to hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/archive/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/a7da3e473e984bdf8594bf36660d46fd 2024-11-16T20:38:49,666 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731789497315.0a3be2714e6da9eb925be22aead0c4a5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/6d6dad7a1c5a4cb59aca9cc6ddc90ad0 to hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/archive/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/6d6dad7a1c5a4cb59aca9cc6ddc90ad0 2024-11-16T20:38:49,667 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731789497315.0a3be2714e6da9eb925be22aead0c4a5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/53241e3870f04076b4c28ffa53fbccd6 to hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/archive/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/53241e3870f04076b4c28ffa53fbccd6 2024-11-16T20:38:49,668 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731789497315.0a3be2714e6da9eb925be22aead0c4a5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/8d0823c88f2e444081cbad57329c6699 to hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/archive/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/8d0823c88f2e444081cbad57329c6699 2024-11-16T20:38:49,668 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731789497315.0a3be2714e6da9eb925be22aead0c4a5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/1a9f85328a4d43698d8a491be58200b1 to hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/archive/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/1a9f85328a4d43698d8a491be58200b1 2024-11-16T20:38:49,669 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731789497315.0a3be2714e6da9eb925be22aead0c4a5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/de26b3dce0c34a0b8af30e5d15928ae9 to hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/archive/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/de26b3dce0c34a0b8af30e5d15928ae9 2024-11-16T20:38:49,670 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731789497315.0a3be2714e6da9eb925be22aead0c4a5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/01d8aaad638441d48d6b03660364f9df to hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/archive/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/info/01d8aaad638441d48d6b03660364f9df 2024-11-16T20:38:49,670 WARN [StoreCloser-TestLogRolling-testLogRolling,row0062,1731789497315.0a3be2714e6da9eb925be22aead0c4a5.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [b8a82a341a6d4e55875487a371e6ba39=43081, c8f8857fe7bf46d8bc7d60b0362faded=12516, 8eded2fc4d8c46779f5b9ca76d5bfe83=63733, 78c981b06f5b41679837b947f241c31f=17906, f561bc04a953458fa7ac4e2a08fc00a9=17906, e044e11d3b8e4b6cb92ba3e08dc41eeb=84390, 7626fbc2ce724419b8e323e84f2a63db=12516, 8fa426acc999460a9b4173c7bc90a992=29784, 8b860031812d4bdeb18d019d27a81da2=116840, 8e7ce8b9f86e47e0b4f465f8eac40153=12516, a6cd91023ce648099615a0e4bc3e88f2=16828, 702ab19114174e9cbe6fa6145d904fcd=143019, 9d779b5601e1428d86cdcbcec2bbbf08=19000, c95ea814b4e44804b0cec432b55e57a6=12517, a7da3e473e984bdf8594bf36660d46fd=162541, 6d6dad7a1c5a4cb59aca9cc6ddc90ad0=16839, 53241e3870f04076b4c28ffa53fbccd6=17918, 8d0823c88f2e444081cbad57329c6699=183148, 1a9f85328a4d43698d8a491be58200b1=12523, de26b3dce0c34a0b8af30e5d15928ae9=17918, 01d8aaad638441d48d6b03660364f9df=16839] 2024-11-16T20:38:49,673 DEBUG [RS_CLOSE_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/data/default/TestLogRolling-testLogRolling/0a3be2714e6da9eb925be22aead0c4a5/recovered.edits/342.seqid, newMaxSeqId=342, maxSeqId=126 2024-11-16T20:38:49,674 INFO [RS_CLOSE_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,row0062,1731789497315.0a3be2714e6da9eb925be22aead0c4a5. 2024-11-16T20:38:49,674 DEBUG [RS_CLOSE_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 0a3be2714e6da9eb925be22aead0c4a5: Waiting for close lock at 1731789529645Running coprocessor pre-close hooks at 1731789529645Disabling compacts and flushes for region at 1731789529645Disabling writes for close at 1731789529645Writing region close event to WAL at 1731789529670 (+25 ms)Running coprocessor post-close hooks at 1731789529674 (+4 ms)Closed at 1731789529674 2024-11-16T20:38:49,674 DEBUG [RS_CLOSE_REGION-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,row0062,1731789497315.0a3be2714e6da9eb925be22aead0c4a5. 2024-11-16T20:38:49,712 INFO [regionserver/40c018648b21:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-16T20:38:49,713 INFO [regionserver/40c018648b21:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-16T20:38:49,716 INFO [regionserver/40c018648b21:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-16T20:38:49,831 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:49,832 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:49,838 INFO [RS:0;40c018648b21:43411 {}] regionserver.HRegionServer(976): stopping server 40c018648b21,43411,1731789471222; all regions closed. 2024-11-16T20:38:49,839 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:38:49,839 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:38:49,839 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:38:49,839 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:38:49,839 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:38:49,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40163 is added to blk_1073741834_1010 (size=8107) 2024-11-16T20:38:49,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34309 is added to blk_1073741834_1010 (size=8107) 2024-11-16T20:38:49,847 DEBUG [RS:0;40c018648b21:43411 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/oldWALs 2024-11-16T20:38:49,847 INFO [RS:0;40c018648b21:43411 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 40c018648b21%2C43411%2C1731789471222.meta:.meta(num 1731789472452) 2024-11-16T20:38:49,848 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:38:49,848 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:38:49,848 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:38:49,849 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:38:49,849 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:38:49,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34309 is added to blk_1073741880_1056 (size=780) 2024-11-16T20:38:49,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40163 is added to blk_1073741880_1056 (size=780) 2024-11-16T20:38:49,853 DEBUG [RS:0;40c018648b21:43411 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/oldWALs 2024-11-16T20:38:49,853 INFO [RS:0;40c018648b21:43411 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 40c018648b21%2C43411%2C1731789471222:(num 1731789529459) 2024-11-16T20:38:49,853 DEBUG [RS:0;40c018648b21:43411 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T20:38:49,853 INFO [RS:0;40c018648b21:43411 {}] regionserver.LeaseManager(133): Closed leases 2024-11-16T20:38:49,853 INFO [RS:0;40c018648b21:43411 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-16T20:38:49,853 INFO [RS:0;40c018648b21:43411 {}] hbase.ChoreService(370): Chore service for: regionserver/40c018648b21:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-16T20:38:49,853 INFO [RS:0;40c018648b21:43411 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-16T20:38:49,853 INFO [regionserver/40c018648b21:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-16T20:38:49,853 INFO [RS:0;40c018648b21:43411 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:43411 2024-11-16T20:38:49,941 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43411-0x101455e38dc0001, quorum=127.0.0.1:56747, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/40c018648b21,43411,1731789471222 2024-11-16T20:38:49,941 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44513-0x101455e38dc0000, quorum=127.0.0.1:56747, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-16T20:38:49,941 INFO [RS:0;40c018648b21:43411 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-16T20:38:49,952 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [40c018648b21,43411,1731789471222] 2024-11-16T20:38:49,962 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/40c018648b21,43411,1731789471222 already deleted, retry=false 2024-11-16T20:38:49,962 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 40c018648b21,43411,1731789471222 expired; onlineServers=0 2024-11-16T20:38:49,962 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '40c018648b21,44513,1731789471042' ***** 2024-11-16T20:38:49,962 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-16T20:38:49,962 INFO [M:0;40c018648b21:44513 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-16T20:38:49,962 INFO [M:0;40c018648b21:44513 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-16T20:38:49,962 DEBUG [M:0;40c018648b21:44513 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-16T20:38:49,962 DEBUG [M:0;40c018648b21:44513 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-16T20:38:49,962 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-16T20:38:49,962 DEBUG [master/40c018648b21:0:becomeActiveMaster-HFileCleaner.large.0-1731789471584 {}] cleaner.HFileCleaner(306): Exit Thread[master/40c018648b21:0:becomeActiveMaster-HFileCleaner.large.0-1731789471584,5,FailOnTimeoutGroup] 2024-11-16T20:38:49,962 DEBUG [master/40c018648b21:0:becomeActiveMaster-HFileCleaner.small.0-1731789471588 {}] cleaner.HFileCleaner(306): Exit Thread[master/40c018648b21:0:becomeActiveMaster-HFileCleaner.small.0-1731789471588,5,FailOnTimeoutGroup] 2024-11-16T20:38:49,963 INFO [M:0;40c018648b21:44513 {}] hbase.ChoreService(370): Chore service for: master/40c018648b21:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-16T20:38:49,963 INFO [M:0;40c018648b21:44513 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-16T20:38:49,963 DEBUG [M:0;40c018648b21:44513 {}] master.HMaster(1795): Stopping service threads 2024-11-16T20:38:49,963 INFO [M:0;40c018648b21:44513 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-16T20:38:49,963 INFO [M:0;40c018648b21:44513 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-16T20:38:49,963 INFO [M:0;40c018648b21:44513 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-16T20:38:49,964 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-16T20:38:49,972 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44513-0x101455e38dc0000, quorum=127.0.0.1:56747, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-16T20:38:49,972 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44513-0x101455e38dc0000, quorum=127.0.0.1:56747, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:38:49,972 DEBUG [M:0;40c018648b21:44513 {}] zookeeper.ZKUtil(347): master:44513-0x101455e38dc0000, quorum=127.0.0.1:56747, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-16T20:38:49,972 WARN [M:0;40c018648b21:44513 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-16T20:38:49,973 INFO [M:0;40c018648b21:44513 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/.lastflushedseqids 2024-11-16T20:38:49,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40163 is added to blk_1073741881_1057 (size=228) 2024-11-16T20:38:49,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34309 is added to blk_1073741881_1057 (size=228) 2024-11-16T20:38:49,978 INFO [M:0;40c018648b21:44513 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-16T20:38:49,978 INFO [M:0;40c018648b21:44513 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-16T20:38:49,979 DEBUG [M:0;40c018648b21:44513 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-16T20:38:49,979 INFO [M:0;40c018648b21:44513 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T20:38:49,979 DEBUG [M:0;40c018648b21:44513 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T20:38:49,979 DEBUG [M:0;40c018648b21:44513 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-16T20:38:49,979 DEBUG [M:0;40c018648b21:44513 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T20:38:49,979 INFO [M:0;40c018648b21:44513 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=51.43 KB heapSize=63.36 KB 2024-11-16T20:38:50,000 DEBUG [M:0;40c018648b21:44513 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d3b960faa05f4e96baa01fc1a8fdcbaa is 82, key is hbase:meta,,1/info:regioninfo/1731789472478/Put/seqid=0 2024-11-16T20:38:50,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40163 is added to blk_1073741882_1058 (size=5672) 2024-11-16T20:38:50,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34309 is added to blk_1073741882_1058 (size=5672) 2024-11-16T20:38:50,005 INFO [M:0;40c018648b21:44513 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d3b960faa05f4e96baa01fc1a8fdcbaa 2024-11-16T20:38:50,022 DEBUG [M:0;40c018648b21:44513 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/0a17b3cbef774810bbc985b6667fc39e is 751, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731789472938/Put/seqid=0 2024-11-16T20:38:50,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34309 is added to blk_1073741883_1059 (size=7091) 2024-11-16T20:38:50,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40163 is added to blk_1073741883_1059 (size=7091) 2024-11-16T20:38:50,027 INFO [M:0;40c018648b21:44513 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=50.83 KB at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/0a17b3cbef774810bbc985b6667fc39e 2024-11-16T20:38:50,030 INFO [M:0;40c018648b21:44513 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 0a17b3cbef774810bbc985b6667fc39e 2024-11-16T20:38:50,044 DEBUG [M:0;40c018648b21:44513 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/5a16ec3195804155949915bb9329b0e8 is 69, key is 40c018648b21,43411,1731789471222/rs:state/1731789471692/Put/seqid=0 2024-11-16T20:38:50,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40163 is added to blk_1073741884_1060 (size=5156) 2024-11-16T20:38:50,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34309 is added to blk_1073741884_1060 (size=5156) 2024-11-16T20:38:50,049 INFO [M:0;40c018648b21:44513 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/5a16ec3195804155949915bb9329b0e8 2024-11-16T20:38:50,052 INFO [RS:0;40c018648b21:43411 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-16T20:38:50,052 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43411-0x101455e38dc0001, quorum=127.0.0.1:56747, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T20:38:50,052 INFO [RS:0;40c018648b21:43411 {}] regionserver.HRegionServer(1031): Exiting; stopping=40c018648b21,43411,1731789471222; zookeeper connection closed. 2024-11-16T20:38:50,052 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43411-0x101455e38dc0001, quorum=127.0.0.1:56747, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T20:38:50,052 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@515d6560 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@515d6560 2024-11-16T20:38:50,052 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-16T20:38:50,065 DEBUG [M:0;40c018648b21:44513 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/42d69dec1ada405db5f8ce71406b9ac3 is 52, key is load_balancer_on/state:d/1731789472558/Put/seqid=0 2024-11-16T20:38:50,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40163 is added to blk_1073741885_1061 (size=5056) 2024-11-16T20:38:50,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34309 is added to blk_1073741885_1061 (size=5056) 2024-11-16T20:38:50,070 INFO [M:0;40c018648b21:44513 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/42d69dec1ada405db5f8ce71406b9ac3 2024-11-16T20:38:50,075 DEBUG [M:0;40c018648b21:44513 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d3b960faa05f4e96baa01fc1a8fdcbaa as hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/d3b960faa05f4e96baa01fc1a8fdcbaa 2024-11-16T20:38:50,079 INFO [M:0;40c018648b21:44513 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/d3b960faa05f4e96baa01fc1a8fdcbaa, entries=8, sequenceid=125, filesize=5.5 K 2024-11-16T20:38:50,080 DEBUG [M:0;40c018648b21:44513 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/0a17b3cbef774810bbc985b6667fc39e as hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/0a17b3cbef774810bbc985b6667fc39e 2024-11-16T20:38:50,085 INFO [M:0;40c018648b21:44513 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 0a17b3cbef774810bbc985b6667fc39e 2024-11-16T20:38:50,085 INFO [M:0;40c018648b21:44513 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/0a17b3cbef774810bbc985b6667fc39e, entries=13, sequenceid=125, filesize=6.9 K 2024-11-16T20:38:50,086 DEBUG [M:0;40c018648b21:44513 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/5a16ec3195804155949915bb9329b0e8 as hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/5a16ec3195804155949915bb9329b0e8 2024-11-16T20:38:50,090 INFO [M:0;40c018648b21:44513 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/5a16ec3195804155949915bb9329b0e8, entries=1, sequenceid=125, filesize=5.0 K 2024-11-16T20:38:50,091 DEBUG [M:0;40c018648b21:44513 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/42d69dec1ada405db5f8ce71406b9ac3 as hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/42d69dec1ada405db5f8ce71406b9ac3 2024-11-16T20:38:50,094 INFO [M:0;40c018648b21:44513 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43583/user/jenkins/test-data/cb77d17d-680a-7452-b2d5-1ce9ae75f985/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/42d69dec1ada405db5f8ce71406b9ac3, entries=1, sequenceid=125, filesize=4.9 K 2024-11-16T20:38:50,095 INFO [M:0;40c018648b21:44513 {}] regionserver.HRegion(3140): Finished flush of dataSize ~51.43 KB/52663, heapSize ~63.30 KB/64816, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 116ms, sequenceid=125, compaction requested=false 2024-11-16T20:38:50,097 INFO [M:0;40c018648b21:44513 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T20:38:50,097 DEBUG [M:0;40c018648b21:44513 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731789529979Disabling compacts and flushes for region at 1731789529979Disabling writes for close at 1731789529979Obtaining lock to block concurrent updates at 1731789529979Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731789529979Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=52663, getHeapSize=64816, getOffHeapSize=0, getCellsCount=148 at 1731789529979Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731789529980 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731789529980Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731789530000 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731789530000Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731789530009 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731789530022 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731789530022Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731789530030 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731789530044 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731789530044Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731789530053 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731789530065 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731789530065Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@24cfdb8: reopening flushed file at 1731789530074 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@52e86140: reopening flushed file at 1731789530079 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@38b840b: reopening flushed file at 1731789530085 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@54bccce9: reopening flushed file at 1731789530090 (+5 ms)Finished flush of dataSize ~51.43 KB/52663, heapSize ~63.30 KB/64816, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 116ms, sequenceid=125, compaction requested=false at 1731789530095 (+5 ms)Writing region close event to WAL at 1731789530097 (+2 ms)Closed at 1731789530097 2024-11-16T20:38:50,097 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:38:50,097 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:38:50,097 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:38:50,097 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:38:50,097 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:38:50,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40163 is added to blk_1073741830_1006 (size=61332) 2024-11-16T20:38:50,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34309 is added to blk_1073741830_1006 (size=61332) 2024-11-16T20:38:50,100 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-16T20:38:50,100 INFO [M:0;40c018648b21:44513 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-16T20:38:50,100 INFO [M:0;40c018648b21:44513 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44513 2024-11-16T20:38:50,100 INFO [M:0;40c018648b21:44513 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-16T20:38:50,209 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44513-0x101455e38dc0000, quorum=127.0.0.1:56747, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T20:38:50,209 INFO [M:0;40c018648b21:44513 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-16T20:38:50,209 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44513-0x101455e38dc0000, quorum=127.0.0.1:56747, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T20:38:50,214 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5ce0a24{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T20:38:50,215 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@47bcda8c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T20:38:50,215 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T20:38:50,215 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6a5db76d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T20:38:50,216 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4437c7ec{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/072b365f-e388-eccd-93c2-d32092a81811/hadoop.log.dir/,STOPPED} 2024-11-16T20:38:50,218 WARN [BP-1515412332-172.17.0.2-1731789468579 heartbeating to localhost/127.0.0.1:43583 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T20:38:50,218 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T20:38:50,218 WARN [BP-1515412332-172.17.0.2-1731789468579 heartbeating to localhost/127.0.0.1:43583 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1515412332-172.17.0.2-1731789468579 (Datanode Uuid b502e2d5-d5b4-4b23-aa3a-d3eef8615f41) service to localhost/127.0.0.1:43583 2024-11-16T20:38:50,218 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T20:38:50,218 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/072b365f-e388-eccd-93c2-d32092a81811/cluster_f3190f36-3be0-ff43-3a58-878acd87bed2/data/data3/current/BP-1515412332-172.17.0.2-1731789468579 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T20:38:50,219 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/072b365f-e388-eccd-93c2-d32092a81811/cluster_f3190f36-3be0-ff43-3a58-878acd87bed2/data/data4/current/BP-1515412332-172.17.0.2-1731789468579 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T20:38:50,219 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T20:38:50,221 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1204fb24{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T20:38:50,221 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@23e1642c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T20:38:50,221 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T20:38:50,222 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5551c062{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T20:38:50,222 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@41b7d19a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/072b365f-e388-eccd-93c2-d32092a81811/hadoop.log.dir/,STOPPED} 2024-11-16T20:38:50,223 WARN [BP-1515412332-172.17.0.2-1731789468579 heartbeating to localhost/127.0.0.1:43583 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T20:38:50,223 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T20:38:50,223 WARN [BP-1515412332-172.17.0.2-1731789468579 heartbeating to localhost/127.0.0.1:43583 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1515412332-172.17.0.2-1731789468579 (Datanode Uuid b5d1c7e4-d556-47ae-b8c1-1a6d504ca25a) service to localhost/127.0.0.1:43583 2024-11-16T20:38:50,223 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T20:38:50,223 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/072b365f-e388-eccd-93c2-d32092a81811/cluster_f3190f36-3be0-ff43-3a58-878acd87bed2/data/data1/current/BP-1515412332-172.17.0.2-1731789468579 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T20:38:50,224 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/072b365f-e388-eccd-93c2-d32092a81811/cluster_f3190f36-3be0-ff43-3a58-878acd87bed2/data/data2/current/BP-1515412332-172.17.0.2-1731789468579 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T20:38:50,224 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T20:38:50,229 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@43909889{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-16T20:38:50,229 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4ac7d52f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T20:38:50,229 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T20:38:50,229 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4881a2ed{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T20:38:50,229 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@45bda0cb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/072b365f-e388-eccd-93c2-d32092a81811/hadoop.log.dir/,STOPPED} 2024-11-16T20:38:50,236 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-16T20:38:50,265 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-16T20:38:50,272 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRolling Thread=231 (was 208) Potentially hanging thread: nioEventLoopGroup-38-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:43583 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43583 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:43583 from jenkins.hfs.6 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-41-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:43583 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:43583 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-40-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43583 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43583 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.6@localhost:43583 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=519 (was 485) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=215 (was 185) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=4569 (was 3779) - AvailableMemoryMB LEAK? - 2024-11-16T20:38:50,279 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=231, OpenFileDescriptor=519, MaxFileDescriptor=1048576, SystemLoadAverage=215, ProcessCount=11, AvailableMemoryMB=4569 2024-11-16T20:38:50,279 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-16T20:38:50,279 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/072b365f-e388-eccd-93c2-d32092a81811/hadoop.log.dir so I do NOT create it in target/test-data/9cd896fc-7a75-cea9-feb1-9a0c737521b6 2024-11-16T20:38:50,279 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/072b365f-e388-eccd-93c2-d32092a81811/hadoop.tmp.dir so I do NOT create it in target/test-data/9cd896fc-7a75-cea9-feb1-9a0c737521b6 2024-11-16T20:38:50,279 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9cd896fc-7a75-cea9-feb1-9a0c737521b6/cluster_05851c2a-d90d-4273-fcba-e46c749ce964, deleteOnExit=true 2024-11-16T20:38:50,279 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-16T20:38:50,279 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9cd896fc-7a75-cea9-feb1-9a0c737521b6/test.cache.data in system properties and HBase conf 2024-11-16T20:38:50,279 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9cd896fc-7a75-cea9-feb1-9a0c737521b6/hadoop.tmp.dir in system properties and HBase conf 2024-11-16T20:38:50,279 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9cd896fc-7a75-cea9-feb1-9a0c737521b6/hadoop.log.dir in system properties and HBase conf 2024-11-16T20:38:50,279 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9cd896fc-7a75-cea9-feb1-9a0c737521b6/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-16T20:38:50,279 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9cd896fc-7a75-cea9-feb1-9a0c737521b6/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-16T20:38:50,279 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-16T20:38:50,280 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-16T20:38:50,280 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9cd896fc-7a75-cea9-feb1-9a0c737521b6/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-16T20:38:50,280 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9cd896fc-7a75-cea9-feb1-9a0c737521b6/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-16T20:38:50,280 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9cd896fc-7a75-cea9-feb1-9a0c737521b6/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-16T20:38:50,280 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9cd896fc-7a75-cea9-feb1-9a0c737521b6/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-16T20:38:50,280 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9cd896fc-7a75-cea9-feb1-9a0c737521b6/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-16T20:38:50,280 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9cd896fc-7a75-cea9-feb1-9a0c737521b6/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-16T20:38:50,280 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9cd896fc-7a75-cea9-feb1-9a0c737521b6/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-16T20:38:50,280 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9cd896fc-7a75-cea9-feb1-9a0c737521b6/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-16T20:38:50,280 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9cd896fc-7a75-cea9-feb1-9a0c737521b6/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-16T20:38:50,280 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9cd896fc-7a75-cea9-feb1-9a0c737521b6/nfs.dump.dir in system properties and HBase conf 2024-11-16T20:38:50,280 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9cd896fc-7a75-cea9-feb1-9a0c737521b6/java.io.tmpdir in system properties and HBase conf 2024-11-16T20:38:50,280 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9cd896fc-7a75-cea9-feb1-9a0c737521b6/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-16T20:38:50,280 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9cd896fc-7a75-cea9-feb1-9a0c737521b6/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-16T20:38:50,281 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9cd896fc-7a75-cea9-feb1-9a0c737521b6/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-16T20:38:50,293 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-16T20:38:50,654 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T20:38:50,657 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T20:38:50,658 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T20:38:50,658 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T20:38:50,658 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-16T20:38:50,659 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T20:38:50,659 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3910812a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9cd896fc-7a75-cea9-feb1-9a0c737521b6/hadoop.log.dir/,AVAILABLE} 2024-11-16T20:38:50,659 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@69a2ae1b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T20:38:50,749 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@a7b167c{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9cd896fc-7a75-cea9-feb1-9a0c737521b6/java.io.tmpdir/jetty-localhost-44599-hadoop-hdfs-3_4_1-tests_jar-_-any-4310774243494616496/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-16T20:38:50,750 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@735ef7ff{HTTP/1.1, (http/1.1)}{localhost:44599} 2024-11-16T20:38:50,750 INFO [Time-limited test {}] server.Server(415): Started @312582ms 2024-11-16T20:38:50,761 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-16T20:38:50,832 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:50,832 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:51,006 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T20:38:51,008 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T20:38:51,008 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T20:38:51,008 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T20:38:51,008 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-16T20:38:51,009 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@54008d53{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9cd896fc-7a75-cea9-feb1-9a0c737521b6/hadoop.log.dir/,AVAILABLE} 2024-11-16T20:38:51,009 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2ec1c28e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T20:38:51,102 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@392cf3b8{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9cd896fc-7a75-cea9-feb1-9a0c737521b6/java.io.tmpdir/jetty-localhost-34623-hadoop-hdfs-3_4_1-tests_jar-_-any-495881081936925675/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T20:38:51,102 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@56fc288a{HTTP/1.1, (http/1.1)}{localhost:34623} 2024-11-16T20:38:51,102 INFO [Time-limited test {}] server.Server(415): Started @312935ms 2024-11-16T20:38:51,103 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T20:38:51,131 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T20:38:51,133 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T20:38:51,134 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T20:38:51,134 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T20:38:51,134 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-16T20:38:51,135 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@fa9d1ff{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9cd896fc-7a75-cea9-feb1-9a0c737521b6/hadoop.log.dir/,AVAILABLE} 2024-11-16T20:38:51,135 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@403020f8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T20:38:51,227 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7b3ba97d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9cd896fc-7a75-cea9-feb1-9a0c737521b6/java.io.tmpdir/jetty-localhost-36269-hadoop-hdfs-3_4_1-tests_jar-_-any-5860733470406461509/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T20:38:51,227 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2e0b3b7c{HTTP/1.1, (http/1.1)}{localhost:36269} 2024-11-16T20:38:51,227 INFO [Time-limited test {}] server.Server(415): Started @313059ms 2024-11-16T20:38:51,228 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T20:38:51,833 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:51,833 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:52,405 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:38:52,405 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:38:52,406 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:38:52,406 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:38:52,407 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:38:52,407 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:38:52,409 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:38:52,410 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:38:52,410 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:38:52,411 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:38:52,433 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:38:52,433 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:38:52,433 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:38:52,433 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:38:52,433 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:38:52,434 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:38:52,436 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:38:52,437 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:38:52,437 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:38:52,438 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:38:52,443 WARN [Thread-2506 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9cd896fc-7a75-cea9-feb1-9a0c737521b6/cluster_05851c2a-d90d-4273-fcba-e46c749ce964/data/data2/current/BP-1614523203-172.17.0.2-1731789530296/current, will proceed with Du for space computation calculation, 2024-11-16T20:38:52,443 WARN [Thread-2505 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9cd896fc-7a75-cea9-feb1-9a0c737521b6/cluster_05851c2a-d90d-4273-fcba-e46c749ce964/data/data1/current/BP-1614523203-172.17.0.2-1731789530296/current, will proceed with Du for space computation calculation, 2024-11-16T20:38:52,461 WARN [Thread-2469 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T20:38:52,463 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3300358252ea8f39 with lease ID 0x31750f7832bf8cbc: Processing first storage report for DS-3060d5e4-b2c0-43f6-a6ca-754e67f0eec8 from datanode DatanodeRegistration(127.0.0.1:41377, datanodeUuid=344a6968-8040-4c53-800e-c7e091c9dc25, infoPort=43853, infoSecurePort=0, ipcPort=44775, storageInfo=lv=-57;cid=testClusterID;nsid=1438436950;c=1731789530296) 2024-11-16T20:38:52,463 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3300358252ea8f39 with lease ID 0x31750f7832bf8cbc: from storage DS-3060d5e4-b2c0-43f6-a6ca-754e67f0eec8 node DatanodeRegistration(127.0.0.1:41377, datanodeUuid=344a6968-8040-4c53-800e-c7e091c9dc25, infoPort=43853, infoSecurePort=0, ipcPort=44775, storageInfo=lv=-57;cid=testClusterID;nsid=1438436950;c=1731789530296), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T20:38:52,463 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3300358252ea8f39 with lease ID 0x31750f7832bf8cbc: Processing first storage report for DS-38231a92-113d-41e1-bf46-186ccab8c282 from datanode DatanodeRegistration(127.0.0.1:41377, datanodeUuid=344a6968-8040-4c53-800e-c7e091c9dc25, infoPort=43853, infoSecurePort=0, ipcPort=44775, storageInfo=lv=-57;cid=testClusterID;nsid=1438436950;c=1731789530296) 2024-11-16T20:38:52,463 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3300358252ea8f39 with lease ID 0x31750f7832bf8cbc: from storage DS-38231a92-113d-41e1-bf46-186ccab8c282 node DatanodeRegistration(127.0.0.1:41377, datanodeUuid=344a6968-8040-4c53-800e-c7e091c9dc25, infoPort=43853, infoSecurePort=0, ipcPort=44775, storageInfo=lv=-57;cid=testClusterID;nsid=1438436950;c=1731789530296), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T20:38:52,543 WARN [Thread-2516 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9cd896fc-7a75-cea9-feb1-9a0c737521b6/cluster_05851c2a-d90d-4273-fcba-e46c749ce964/data/data3/current/BP-1614523203-172.17.0.2-1731789530296/current, will proceed with Du for space computation calculation, 2024-11-16T20:38:52,543 WARN [Thread-2517 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9cd896fc-7a75-cea9-feb1-9a0c737521b6/cluster_05851c2a-d90d-4273-fcba-e46c749ce964/data/data4/current/BP-1614523203-172.17.0.2-1731789530296/current, will proceed with Du for space computation calculation, 2024-11-16T20:38:52,569 WARN [Thread-2492 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T20:38:52,570 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xccef5e777f21cabd with lease ID 0x31750f7832bf8cbd: Processing first storage report for DS-22b3249d-0db4-40dc-9cc4-4f3a1c4adba3 from datanode DatanodeRegistration(127.0.0.1:42435, datanodeUuid=e6e02dbc-328b-485a-a54c-8394e2fba484, infoPort=41683, infoSecurePort=0, ipcPort=36113, storageInfo=lv=-57;cid=testClusterID;nsid=1438436950;c=1731789530296) 2024-11-16T20:38:52,570 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xccef5e777f21cabd with lease ID 0x31750f7832bf8cbd: from storage DS-22b3249d-0db4-40dc-9cc4-4f3a1c4adba3 node DatanodeRegistration(127.0.0.1:42435, datanodeUuid=e6e02dbc-328b-485a-a54c-8394e2fba484, infoPort=41683, infoSecurePort=0, ipcPort=36113, storageInfo=lv=-57;cid=testClusterID;nsid=1438436950;c=1731789530296), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T20:38:52,570 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xccef5e777f21cabd with lease ID 0x31750f7832bf8cbd: Processing first storage report for DS-61df0e4f-8a48-4abc-877a-36a83d75d5bc from datanode DatanodeRegistration(127.0.0.1:42435, datanodeUuid=e6e02dbc-328b-485a-a54c-8394e2fba484, infoPort=41683, infoSecurePort=0, ipcPort=36113, storageInfo=lv=-57;cid=testClusterID;nsid=1438436950;c=1731789530296) 2024-11-16T20:38:52,570 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xccef5e777f21cabd with lease ID 0x31750f7832bf8cbd: from storage DS-61df0e4f-8a48-4abc-877a-36a83d75d5bc node DatanodeRegistration(127.0.0.1:42435, datanodeUuid=e6e02dbc-328b-485a-a54c-8394e2fba484, infoPort=41683, infoSecurePort=0, ipcPort=36113, storageInfo=lv=-57;cid=testClusterID;nsid=1438436950;c=1731789530296), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T20:38:52,659 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9cd896fc-7a75-cea9-feb1-9a0c737521b6 2024-11-16T20:38:52,663 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9cd896fc-7a75-cea9-feb1-9a0c737521b6/cluster_05851c2a-d90d-4273-fcba-e46c749ce964/zookeeper_0, clientPort=53778, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9cd896fc-7a75-cea9-feb1-9a0c737521b6/cluster_05851c2a-d90d-4273-fcba-e46c749ce964/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9cd896fc-7a75-cea9-feb1-9a0c737521b6/cluster_05851c2a-d90d-4273-fcba-e46c749ce964/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-16T20:38:52,664 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=53778 2024-11-16T20:38:52,664 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T20:38:52,665 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T20:38:52,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41377 is added to blk_1073741825_1001 (size=7) 2024-11-16T20:38:52,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42435 is added to blk_1073741825_1001 (size=7) 2024-11-16T20:38:52,675 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:45225/user/jenkins/test-data/38859eee-a0e7-2d8c-5aaa-ae1611839a1c with version=8 2024-11-16T20:38:52,675 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:38761/user/jenkins/test-data/3666472d-5994-c87f-0075-374dd648bdbc/hbase-staging 2024-11-16T20:38:52,678 INFO [Time-limited test {}] client.ConnectionUtils(128): master/40c018648b21:0 server-side Connection retries=45 2024-11-16T20:38:52,678 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T20:38:52,678 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-16T20:38:52,678 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-16T20:38:52,678 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T20:38:52,678 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-16T20:38:52,678 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-16T20:38:52,678 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-16T20:38:52,679 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45723 2024-11-16T20:38:52,680 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:45723 connecting to ZooKeeper ensemble=127.0.0.1:53778 2024-11-16T20:38:52,742 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:457230x0, quorum=127.0.0.1:53778, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-16T20:38:52,743 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:45723-0x101455f29a10000 connected 2024-11-16T20:38:52,828 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T20:38:52,830 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T20:38:52,832 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45723-0x101455f29a10000, quorum=127.0.0.1:53778, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T20:38:52,832 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:45225/user/jenkins/test-data/38859eee-a0e7-2d8c-5aaa-ae1611839a1c, hbase.cluster.distributed=false 2024-11-16T20:38:52,833 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:52,833 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:52,834 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45723-0x101455f29a10000, quorum=127.0.0.1:53778, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-16T20:38:52,835 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45723 2024-11-16T20:38:52,835 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45723 2024-11-16T20:38:52,835 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45723 2024-11-16T20:38:52,836 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45723 2024-11-16T20:38:52,836 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45723 2024-11-16T20:38:52,851 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/40c018648b21:0 server-side Connection retries=45 2024-11-16T20:38:52,851 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T20:38:52,851 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-16T20:38:52,851 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-16T20:38:52,851 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T20:38:52,851 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-16T20:38:52,851 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-16T20:38:52,851 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-16T20:38:52,852 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44211 2024-11-16T20:38:52,853 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:44211 connecting to ZooKeeper ensemble=127.0.0.1:53778 2024-11-16T20:38:52,853 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T20:38:52,854 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T20:38:52,860 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:442110x0, quorum=127.0.0.1:53778, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-16T20:38:52,860 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:442110x0, quorum=127.0.0.1:53778, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T20:38:52,860 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:44211-0x101455f29a10001 connected 2024-11-16T20:38:52,860 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-16T20:38:52,861 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-16T20:38:52,861 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44211-0x101455f29a10001, quorum=127.0.0.1:53778, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-16T20:38:52,862 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44211-0x101455f29a10001, quorum=127.0.0.1:53778, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-16T20:38:52,862 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44211 2024-11-16T20:38:52,864 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44211 2024-11-16T20:38:52,868 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44211 2024-11-16T20:38:52,868 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44211 2024-11-16T20:38:52,869 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44211 2024-11-16T20:38:52,883 DEBUG [M:0;40c018648b21:45723 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;40c018648b21:45723 2024-11-16T20:38:52,884 INFO [master/40c018648b21:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/40c018648b21,45723,1731789532677 2024-11-16T20:38:52,891 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44211-0x101455f29a10001, quorum=127.0.0.1:53778, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T20:38:52,891 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45723-0x101455f29a10000, quorum=127.0.0.1:53778, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T20:38:52,891 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45723-0x101455f29a10000, quorum=127.0.0.1:53778, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/40c018648b21,45723,1731789532677 2024-11-16T20:38:52,902 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44211-0x101455f29a10001, quorum=127.0.0.1:53778, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-16T20:38:52,902 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45723-0x101455f29a10000, quorum=127.0.0.1:53778, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:38:52,902 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44211-0x101455f29a10001, quorum=127.0.0.1:53778, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:38:52,902 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45723-0x101455f29a10000, quorum=127.0.0.1:53778, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-16T20:38:52,902 INFO [master/40c018648b21:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/40c018648b21,45723,1731789532677 from backup master directory 2024-11-16T20:38:52,942 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-16T20:38:52,943 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:38:52,944 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:38:52,944 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:38:52,944 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:38:52,944 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:38:52,945 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:38:52,946 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:38:52,946 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:38:52,946 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:38:52,947 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:38:52,970 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:38:52,970 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:38:52,971 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:38:52,971 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:38:52,971 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:38:52,971 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:38:52,975 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:38:52,975 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:38:52,975 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T20:38:52,991 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44211-0x101455f29a10001, quorum=127.0.0.1:53778, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T20:38:52,991 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45723-0x101455f29a10000, quorum=127.0.0.1:53778, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/40c018648b21,45723,1731789532677 2024-11-16T20:38:52,991 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45723-0x101455f29a10000, quorum=127.0.0.1:53778, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T20:38:52,991 WARN [master/40c018648b21:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-16T20:38:52,991 INFO [master/40c018648b21:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=40c018648b21,45723,1731789532677 2024-11-16T20:38:52,994 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:45225/user/jenkins/test-data/38859eee-a0e7-2d8c-5aaa-ae1611839a1c/hbase.id] with ID: a784b55d-2b9b-4408-ba0b-6f3fe114e925 2024-11-16T20:38:52,994 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:45225/user/jenkins/test-data/38859eee-a0e7-2d8c-5aaa-ae1611839a1c/.tmp/hbase.id 2024-11-16T20:38:52,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41377 is added to blk_1073741826_1002 (size=42) 2024-11-16T20:38:52,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42435 is added to blk_1073741826_1002 (size=42) 2024-11-16T20:38:53,000 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:45225/user/jenkins/test-data/38859eee-a0e7-2d8c-5aaa-ae1611839a1c/.tmp/hbase.id]:[hdfs://localhost:45225/user/jenkins/test-data/38859eee-a0e7-2d8c-5aaa-ae1611839a1c/hbase.id] 2024-11-16T20:38:53,009 INFO [master/40c018648b21:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T20:38:53,009 INFO [master/40c018648b21:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-16T20:38:53,010 INFO [master/40c018648b21:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-16T20:38:53,038 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44211-0x101455f29a10001, quorum=127.0.0.1:53778, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:38:53,038 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45723-0x101455f29a10000, quorum=127.0.0.1:53778, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:38:53,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41377 is added to blk_1073741827_1003 (size=196) 2024-11-16T20:38:53,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42435 is added to blk_1073741827_1003 (size=196) 2024-11-16T20:38:53,051 INFO [master/40c018648b21:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-16T20:38:53,051 INFO [master/40c018648b21:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-16T20:38:53,052 INFO [master/40c018648b21:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T20:38:53,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41377 is added to blk_1073741828_1004 (size=1189) 2024-11-16T20:38:53,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42435 is added to blk_1073741828_1004 (size=1189) 2024-11-16T20:38:53,066 INFO [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:45225/user/jenkins/test-data/38859eee-a0e7-2d8c-5aaa-ae1611839a1c/MasterData/data/master/store 2024-11-16T20:38:53,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41377 is added to blk_1073741829_1005 (size=34) 2024-11-16T20:38:53,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42435 is added to blk_1073741829_1005 (size=34) 2024-11-16T20:38:53,072 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T20:38:53,073 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-16T20:38:53,073 INFO [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T20:38:53,073 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T20:38:53,073 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-16T20:38:53,073 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T20:38:53,073 INFO [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T20:38:53,073 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731789533073Disabling compacts and flushes for region at 1731789533073Disabling writes for close at 1731789533073Writing region close event to WAL at 1731789533073Closed at 1731789533073 2024-11-16T20:38:53,074 WARN [master/40c018648b21:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:45225/user/jenkins/test-data/38859eee-a0e7-2d8c-5aaa-ae1611839a1c/MasterData/data/master/store/.initializing 2024-11-16T20:38:53,074 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:45225/user/jenkins/test-data/38859eee-a0e7-2d8c-5aaa-ae1611839a1c/MasterData/WALs/40c018648b21,45723,1731789532677 2024-11-16T20:38:53,076 INFO [master/40c018648b21:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=40c018648b21%2C45723%2C1731789532677, suffix=, logDir=hdfs://localhost:45225/user/jenkins/test-data/38859eee-a0e7-2d8c-5aaa-ae1611839a1c/MasterData/WALs/40c018648b21,45723,1731789532677, archiveDir=hdfs://localhost:45225/user/jenkins/test-data/38859eee-a0e7-2d8c-5aaa-ae1611839a1c/MasterData/oldWALs, maxLogs=10 2024-11-16T20:38:53,076 INFO [master/40c018648b21:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 40c018648b21%2C45723%2C1731789532677.1731789533076 2024-11-16T20:38:53,086 INFO [master/40c018648b21:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/38859eee-a0e7-2d8c-5aaa-ae1611839a1c/MasterData/WALs/40c018648b21,45723,1731789532677/40c018648b21%2C45723%2C1731789532677.1731789533076 2024-11-16T20:38:53,088 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41683:41683),(127.0.0.1/127.0.0.1:43853:43853)] 2024-11-16T20:38:53,089 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-16T20:38:53,089 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T20:38:53,089 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T20:38:53,089 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T20:38:53,091 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T20:38:53,092 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-16T20:38:53,092 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:38:53,093 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T20:38:53,093 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T20:38:53,094 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-16T20:38:53,094 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:38:53,095 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T20:38:53,095 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T20:38:53,096 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-16T20:38:53,096 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:38:53,096 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T20:38:53,097 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T20:38:53,098 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-16T20:38:53,098 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:38:53,098 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T20:38:53,099 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T20:38:53,099 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45225/user/jenkins/test-data/38859eee-a0e7-2d8c-5aaa-ae1611839a1c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-16T20:38:53,100 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45225/user/jenkins/test-data/38859eee-a0e7-2d8c-5aaa-ae1611839a1c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-16T20:38:53,101 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T20:38:53,101 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T20:38:53,101 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-16T20:38:53,102 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T20:38:53,104 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45225/user/jenkins/test-data/38859eee-a0e7-2d8c-5aaa-ae1611839a1c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T20:38:53,105 INFO [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=863176, jitterRate=0.09758622944355011}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-16T20:38:53,105 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731789533090Initializing all the Stores at 1731789533090Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731789533090Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731789533091 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731789533091Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731789533091Cleaning up temporary data from old regions at 1731789533101 (+10 ms)Region opened successfully at 1731789533105 (+4 ms) 2024-11-16T20:38:53,105 INFO [master/40c018648b21:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-16T20:38:53,108 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1470057c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=40c018648b21/172.17.0.2:0 2024-11-16T20:38:53,109 INFO [master/40c018648b21:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-16T20:38:53,109 INFO [master/40c018648b21:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-16T20:38:53,109 INFO [master/40c018648b21:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-16T20:38:53,109 INFO [master/40c018648b21:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-16T20:38:53,110 INFO [master/40c018648b21:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-16T20:38:53,110 INFO [master/40c018648b21:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-16T20:38:53,110 INFO [master/40c018648b21:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-16T20:38:53,112 INFO [master/40c018648b21:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-16T20:38:53,113 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45723-0x101455f29a10000, quorum=127.0.0.1:53778, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-16T20:38:53,123 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-16T20:38:53,123 INFO [master/40c018648b21:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-16T20:38:53,124 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45723-0x101455f29a10000, quorum=127.0.0.1:53778, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-16T20:38:53,133 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-16T20:38:53,133 INFO [master/40c018648b21:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-16T20:38:53,134 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45723-0x101455f29a10000, quorum=127.0.0.1:53778, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-16T20:38:53,144 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-16T20:38:53,144 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45723-0x101455f29a10000, quorum=127.0.0.1:53778, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-16T20:38:53,154 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-16T20:38:53,156 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45723-0x101455f29a10000, quorum=127.0.0.1:53778, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-16T20:38:53,165 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-16T20:38:53,175 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44211-0x101455f29a10001, quorum=127.0.0.1:53778, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-16T20:38:53,175 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45723-0x101455f29a10000, quorum=127.0.0.1:53778, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-16T20:38:53,175 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44211-0x101455f29a10001, quorum=127.0.0.1:53778, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:38:53,175 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45723-0x101455f29a10000, quorum=127.0.0.1:53778, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:38:53,176 INFO [master/40c018648b21:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=40c018648b21,45723,1731789532677, sessionid=0x101455f29a10000, setting cluster-up flag (Was=false) 2024-11-16T20:38:53,196 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45723-0x101455f29a10000, quorum=127.0.0.1:53778, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:38:53,196 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44211-0x101455f29a10001, quorum=127.0.0.1:53778, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:38:53,228 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-16T20:38:53,230 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=40c018648b21,45723,1731789532677 2024-11-16T20:38:53,249 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45723-0x101455f29a10000, quorum=127.0.0.1:53778, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:38:53,249 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44211-0x101455f29a10001, quorum=127.0.0.1:53778, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:38:53,281 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-16T20:38:53,282 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=40c018648b21,45723,1731789532677 2024-11-16T20:38:53,283 INFO [master/40c018648b21:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:45225/user/jenkins/test-data/38859eee-a0e7-2d8c-5aaa-ae1611839a1c/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-16T20:38:53,284 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-16T20:38:53,284 INFO [master/40c018648b21:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-16T20:38:53,284 INFO [master/40c018648b21:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-16T20:38:53,285 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 40c018648b21,45723,1731789532677 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-16T20:38:53,286 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/40c018648b21:0, corePoolSize=5, maxPoolSize=5 2024-11-16T20:38:53,286 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/40c018648b21:0, corePoolSize=5, maxPoolSize=5 2024-11-16T20:38:53,286 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/40c018648b21:0, corePoolSize=5, maxPoolSize=5 2024-11-16T20:38:53,286 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/40c018648b21:0, corePoolSize=5, maxPoolSize=5 2024-11-16T20:38:53,286 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/40c018648b21:0, corePoolSize=10, maxPoolSize=10 2024-11-16T20:38:53,286 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:38:53,286 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/40c018648b21:0, corePoolSize=2, maxPoolSize=2 2024-11-16T20:38:53,286 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:38:53,288 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T20:38:53,288 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-16T20:38:53,289 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:38:53,289 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-16T20:38:53,290 INFO [master/40c018648b21:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731789563290 2024-11-16T20:38:53,291 INFO [master/40c018648b21:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-16T20:38:53,291 INFO [master/40c018648b21:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-16T20:38:53,291 INFO [master/40c018648b21:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-16T20:38:53,291 INFO [master/40c018648b21:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-16T20:38:53,291 INFO [master/40c018648b21:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-16T20:38:53,291 INFO [master/40c018648b21:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-16T20:38:53,291 INFO [master/40c018648b21:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-16T20:38:53,291 INFO [master/40c018648b21:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-16T20:38:53,291 INFO [master/40c018648b21:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-16T20:38:53,291 INFO [master/40c018648b21:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-16T20:38:53,292 INFO [master/40c018648b21:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-16T20:38:53,292 INFO [master/40c018648b21:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-16T20:38:53,297 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/40c018648b21:0:becomeActiveMaster-HFileCleaner.large.0-1731789533292,5,FailOnTimeoutGroup] 2024-11-16T20:38:53,297 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/40c018648b21:0:becomeActiveMaster-HFileCleaner.small.0-1731789533297,5,FailOnTimeoutGroup] 2024-11-16T20:38:53,298 INFO [master/40c018648b21:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-16T20:38:53,298 INFO [master/40c018648b21:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-16T20:38:53,298 INFO [master/40c018648b21:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-16T20:38:53,298 INFO [master/40c018648b21:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-16T20:38:53,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42435 is added to blk_1073741831_1007 (size=1321) 2024-11-16T20:38:53,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41377 is added to blk_1073741831_1007 (size=1321) 2024-11-16T20:38:53,303 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:45225/user/jenkins/test-data/38859eee-a0e7-2d8c-5aaa-ae1611839a1c/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-16T20:38:53,303 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:45225/user/jenkins/test-data/38859eee-a0e7-2d8c-5aaa-ae1611839a1c 2024-11-16T20:38:53,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42435 is added to blk_1073741832_1008 (size=32) 2024-11-16T20:38:53,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41377 is added to blk_1073741832_1008 (size=32) 2024-11-16T20:38:53,314 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T20:38:53,315 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-16T20:38:53,316 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-16T20:38:53,316 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:38:53,317 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T20:38:53,317 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-16T20:38:53,318 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-16T20:38:53,318 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:38:53,318 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T20:38:53,318 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-16T20:38:53,320 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-16T20:38:53,320 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:38:53,320 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T20:38:53,321 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-16T20:38:53,323 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-16T20:38:53,323 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:38:53,324 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T20:38:53,324 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-16T20:38:53,325 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45225/user/jenkins/test-data/38859eee-a0e7-2d8c-5aaa-ae1611839a1c/data/hbase/meta/1588230740 2024-11-16T20:38:53,325 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45225/user/jenkins/test-data/38859eee-a0e7-2d8c-5aaa-ae1611839a1c/data/hbase/meta/1588230740 2024-11-16T20:38:53,327 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-16T20:38:53,327 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-16T20:38:53,327 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-16T20:38:53,329 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-16T20:38:53,331 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45225/user/jenkins/test-data/38859eee-a0e7-2d8c-5aaa-ae1611839a1c/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T20:38:53,332 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=861960, jitterRate=0.09603939950466156}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-16T20:38:53,332 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731789533314Initializing all the Stores at 1731789533315 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731789533315Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731789533315Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731789533315Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731789533315Cleaning up temporary data from old regions at 1731789533327 (+12 ms)Region opened successfully at 1731789533332 (+5 ms) 2024-11-16T20:38:53,332 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-16T20:38:53,332 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-16T20:38:53,333 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-16T20:38:53,333 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-16T20:38:53,333 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-16T20:38:53,333 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-16T20:38:53,333 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731789533332Disabling compacts and flushes for region at 1731789533332Disabling writes for close at 1731789533333 (+1 ms)Writing region close event to WAL at 1731789533333Closed at 1731789533333 2024-11-16T20:38:53,334 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T20:38:53,334 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-16T20:38:53,334 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-16T20:38:53,335 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-16T20:38:53,336 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-16T20:38:53,371 INFO [RS:0;40c018648b21:44211 {}] regionserver.HRegionServer(746): ClusterId : a784b55d-2b9b-4408-ba0b-6f3fe114e925 2024-11-16T20:38:53,371 DEBUG [RS:0;40c018648b21:44211 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-16T20:38:53,434 DEBUG [RS:0;40c018648b21:44211 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-16T20:38:53,434 DEBUG [RS:0;40c018648b21:44211 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-16T20:38:53,460 DEBUG [RS:0;40c018648b21:44211 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-16T20:38:53,461 DEBUG [RS:0;40c018648b21:44211 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@787564c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=40c018648b21/172.17.0.2:0 2024-11-16T20:38:53,471 DEBUG [RS:0;40c018648b21:44211 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;40c018648b21:44211 2024-11-16T20:38:53,471 INFO [RS:0;40c018648b21:44211 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-16T20:38:53,471 INFO [RS:0;40c018648b21:44211 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-16T20:38:53,471 DEBUG [RS:0;40c018648b21:44211 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-16T20:38:53,471 INFO [RS:0;40c018648b21:44211 {}] regionserver.HRegionServer(2659): reportForDuty to master=40c018648b21,45723,1731789532677 with port=44211, startcode=1731789532850 2024-11-16T20:38:53,472 DEBUG [RS:0;40c018648b21:44211 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-16T20:38:53,473 INFO [HMaster-EventLoopGroup-16-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50977, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.7 (auth:SIMPLE), service=RegionServerStatusService 2024-11-16T20:38:53,474 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45723 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 40c018648b21,44211,1731789532850 2024-11-16T20:38:53,474 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45723 {}] master.ServerManager(517): Registering regionserver=40c018648b21,44211,1731789532850 2024-11-16T20:38:53,475 DEBUG [RS:0;40c018648b21:44211 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:45225/user/jenkins/test-data/38859eee-a0e7-2d8c-5aaa-ae1611839a1c 2024-11-16T20:38:53,475 DEBUG [RS:0;40c018648b21:44211 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:45225 2024-11-16T20:38:53,475 DEBUG [RS:0;40c018648b21:44211 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-16T20:38:53,481 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45723-0x101455f29a10000, quorum=127.0.0.1:53778, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-16T20:38:53,481 DEBUG [RS:0;40c018648b21:44211 {}] zookeeper.ZKUtil(111): regionserver:44211-0x101455f29a10001, quorum=127.0.0.1:53778, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/40c018648b21,44211,1731789532850 2024-11-16T20:38:53,481 WARN [RS:0;40c018648b21:44211 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-16T20:38:53,481 INFO [RS:0;40c018648b21:44211 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T20:38:53,481 DEBUG [RS:0;40c018648b21:44211 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:45225/user/jenkins/test-data/38859eee-a0e7-2d8c-5aaa-ae1611839a1c/WALs/40c018648b21,44211,1731789532850 2024-11-16T20:38:53,482 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [40c018648b21,44211,1731789532850] 2024-11-16T20:38:53,485 INFO [RS:0;40c018648b21:44211 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-16T20:38:53,486 INFO [RS:0;40c018648b21:44211 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-16T20:38:53,486 INFO [RS:0;40c018648b21:44211 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-16T20:38:53,487 WARN [40c018648b21:45723 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-16T20:38:53,486 INFO [RS:0;40c018648b21:44211 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T20:38:53,487 INFO [RS:0;40c018648b21:44211 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-16T20:38:53,487 INFO [RS:0;40c018648b21:44211 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-16T20:38:53,487 INFO [RS:0;40c018648b21:44211 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-16T20:38:53,488 DEBUG [RS:0;40c018648b21:44211 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:38:53,488 DEBUG [RS:0;40c018648b21:44211 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:38:53,488 DEBUG [RS:0;40c018648b21:44211 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:38:53,488 DEBUG [RS:0;40c018648b21:44211 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:38:53,488 DEBUG [RS:0;40c018648b21:44211 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:38:53,488 DEBUG [RS:0;40c018648b21:44211 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/40c018648b21:0, corePoolSize=2, maxPoolSize=2 2024-11-16T20:38:53,488 DEBUG [RS:0;40c018648b21:44211 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:38:53,488 DEBUG [RS:0;40c018648b21:44211 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:38:53,488 DEBUG [RS:0;40c018648b21:44211 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:38:53,488 DEBUG [RS:0;40c018648b21:44211 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:38:53,488 DEBUG [RS:0;40c018648b21:44211 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:38:53,488 DEBUG [RS:0;40c018648b21:44211 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/40c018648b21:0, corePoolSize=1, maxPoolSize=1 2024-11-16T20:38:53,488 DEBUG [RS:0;40c018648b21:44211 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/40c018648b21:0, corePoolSize=3, maxPoolSize=3 2024-11-16T20:38:53,488 DEBUG [RS:0;40c018648b21:44211 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/40c018648b21:0, corePoolSize=3, maxPoolSize=3 2024-11-16T20:38:53,489 INFO [RS:0;40c018648b21:44211 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T20:38:53,489 INFO [RS:0;40c018648b21:44211 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T20:38:53,489 INFO [RS:0;40c018648b21:44211 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T20:38:53,489 INFO [RS:0;40c018648b21:44211 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-16T20:38:53,489 INFO [RS:0;40c018648b21:44211 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-16T20:38:53,489 INFO [RS:0;40c018648b21:44211 {}] hbase.ChoreService(168): Chore ScheduledChore name=40c018648b21,44211,1731789532850-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-16T20:38:53,504 INFO [RS:0;40c018648b21:44211 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-16T20:38:53,504 INFO [RS:0;40c018648b21:44211 {}] hbase.ChoreService(168): Chore ScheduledChore name=40c018648b21,44211,1731789532850-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T20:38:53,504 INFO [RS:0;40c018648b21:44211 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T20:38:53,505 INFO [RS:0;40c018648b21:44211 {}] regionserver.Replication(171): 40c018648b21,44211,1731789532850 started 2024-11-16T20:38:53,516 INFO [RS:0;40c018648b21:44211 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T20:38:53,516 INFO [RS:0;40c018648b21:44211 {}] regionserver.HRegionServer(1482): Serving as 40c018648b21,44211,1731789532850, RpcServer on 40c018648b21/172.17.0.2:44211, sessionid=0x101455f29a10001 2024-11-16T20:38:53,516 DEBUG [RS:0;40c018648b21:44211 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-16T20:38:53,516 DEBUG [RS:0;40c018648b21:44211 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 40c018648b21,44211,1731789532850 2024-11-16T20:38:53,516 DEBUG [RS:0;40c018648b21:44211 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '40c018648b21,44211,1731789532850' 2024-11-16T20:38:53,517 DEBUG [RS:0;40c018648b21:44211 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-16T20:38:53,517 DEBUG [RS:0;40c018648b21:44211 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-16T20:38:53,518 DEBUG [RS:0;40c018648b21:44211 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-16T20:38:53,518 DEBUG [RS:0;40c018648b21:44211 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-16T20:38:53,518 DEBUG [RS:0;40c018648b21:44211 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 40c018648b21,44211,1731789532850 2024-11-16T20:38:53,518 DEBUG [RS:0;40c018648b21:44211 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '40c018648b21,44211,1731789532850' 2024-11-16T20:38:53,518 DEBUG [RS:0;40c018648b21:44211 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-16T20:38:53,518 DEBUG [RS:0;40c018648b21:44211 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-16T20:38:53,519 DEBUG [RS:0;40c018648b21:44211 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-16T20:38:53,519 INFO [RS:0;40c018648b21:44211 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-16T20:38:53,519 INFO [RS:0;40c018648b21:44211 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-16T20:38:53,622 INFO [RS:0;40c018648b21:44211 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=40c018648b21%2C44211%2C1731789532850, suffix=, logDir=hdfs://localhost:45225/user/jenkins/test-data/38859eee-a0e7-2d8c-5aaa-ae1611839a1c/WALs/40c018648b21,44211,1731789532850, archiveDir=hdfs://localhost:45225/user/jenkins/test-data/38859eee-a0e7-2d8c-5aaa-ae1611839a1c/oldWALs, maxLogs=32 2024-11-16T20:38:53,623 INFO [RS:0;40c018648b21:44211 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 40c018648b21%2C44211%2C1731789532850.1731789533623 2024-11-16T20:38:53,633 INFO [RS:0;40c018648b21:44211 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/38859eee-a0e7-2d8c-5aaa-ae1611839a1c/WALs/40c018648b21,44211,1731789532850/40c018648b21%2C44211%2C1731789532850.1731789533623 2024-11-16T20:38:53,635 DEBUG [RS:0;40c018648b21:44211 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43853:43853),(127.0.0.1/127.0.0.1:41683:41683)] 2024-11-16T20:38:53,737 DEBUG [40c018648b21:45723 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-16T20:38:53,738 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=40c018648b21,44211,1731789532850 2024-11-16T20:38:53,740 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 40c018648b21,44211,1731789532850, state=OPENING 2024-11-16T20:38:53,753 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-16T20:38:53,834 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,46419,1731789328882/40c018648b21%2C46419%2C1731789328882.meta.1731789329897.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:53,834 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33297/user/jenkins/test-data/8b53ef5d-0c37-dcdf-89b7-ede99af1fb65/WALs/40c018648b21,39701,1731789330052/40c018648b21%2C39701%2C1731789330052.1731789330297 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T20:38:53,839 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44211-0x101455f29a10001, quorum=127.0.0.1:53778, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:38:53,839 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45723-0x101455f29a10000, quorum=127.0.0.1:53778, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:38:53,840 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T20:38:53,841 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T20:38:53,841 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-16T20:38:53,841 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=40c018648b21,44211,1731789532850}] 2024-11-16T20:38:53,996 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-16T20:38:54,000 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33877, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-16T20:38:54,009 INFO [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-16T20:38:54,009 INFO [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T20:38:54,011 INFO [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=40c018648b21%2C44211%2C1731789532850.meta, suffix=.meta, logDir=hdfs://localhost:45225/user/jenkins/test-data/38859eee-a0e7-2d8c-5aaa-ae1611839a1c/WALs/40c018648b21,44211,1731789532850, archiveDir=hdfs://localhost:45225/user/jenkins/test-data/38859eee-a0e7-2d8c-5aaa-ae1611839a1c/oldWALs, maxLogs=32 2024-11-16T20:38:54,011 INFO [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 40c018648b21%2C44211%2C1731789532850.meta.1731789534011.meta 2024-11-16T20:38:54,016 INFO [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/38859eee-a0e7-2d8c-5aaa-ae1611839a1c/WALs/40c018648b21,44211,1731789532850/40c018648b21%2C44211%2C1731789532850.meta.1731789534011.meta 2024-11-16T20:38:54,019 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41683:41683),(127.0.0.1/127.0.0.1:43853:43853)] 2024-11-16T20:38:54,021 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-16T20:38:54,021 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-16T20:38:54,021 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-16T20:38:54,021 INFO [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-16T20:38:54,021 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-16T20:38:54,021 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T20:38:54,021 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-16T20:38:54,021 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-16T20:38:54,023 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-16T20:38:54,023 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-16T20:38:54,023 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:38:54,024 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T20:38:54,024 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-16T20:38:54,025 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-16T20:38:54,025 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:38:54,025 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T20:38:54,025 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-16T20:38:54,026 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-16T20:38:54,026 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:38:54,026 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T20:38:54,027 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-16T20:38:54,027 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-16T20:38:54,027 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T20:38:54,028 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T20:38:54,028 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-16T20:38:54,028 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45225/user/jenkins/test-data/38859eee-a0e7-2d8c-5aaa-ae1611839a1c/data/hbase/meta/1588230740 2024-11-16T20:38:54,029 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45225/user/jenkins/test-data/38859eee-a0e7-2d8c-5aaa-ae1611839a1c/data/hbase/meta/1588230740 2024-11-16T20:38:54,031 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-16T20:38:54,031 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-16T20:38:54,031 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-16T20:38:54,032 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-16T20:38:54,033 INFO [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=773969, jitterRate=-0.015848785638809204}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-16T20:38:54,033 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-16T20:38:54,033 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731789534021Writing region info on filesystem at 1731789534021Initializing all the Stores at 1731789534022 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731789534022Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731789534023 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731789534023Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731789534023Cleaning up temporary data from old regions at 1731789534031 (+8 ms)Running coprocessor post-open hooks at 1731789534033 (+2 ms)Region opened successfully at 1731789534033 2024-11-16T20:38:54,034 INFO [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731789533996 2024-11-16T20:38:54,036 DEBUG [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-16T20:38:54,036 INFO [RS_OPEN_META-regionserver/40c018648b21:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-16T20:38:54,037 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=40c018648b21,44211,1731789532850 2024-11-16T20:38:54,037 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 40c018648b21,44211,1731789532850, state=OPEN 2024-11-16T20:38:54,080 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45723-0x101455f29a10000, quorum=127.0.0.1:53778, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-16T20:38:54,080 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44211-0x101455f29a10001, quorum=127.0.0.1:53778, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-16T20:38:54,080 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=40c018648b21,44211,1731789532850 2024-11-16T20:38:54,080 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T20:38:54,080 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T20:38:54,082 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-16T20:38:54,082 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=40c018648b21,44211,1731789532850 in 239 msec 2024-11-16T20:38:54,084 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-16T20:38:54,084 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 748 msec 2024-11-16T20:38:54,085 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T20:38:54,085 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-16T20:38:54,086 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T20:38:54,086 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=40c018648b21,44211,1731789532850, seqNum=-1] 2024-11-16T20:38:54,086 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T20:38:54,087 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39971, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T20:38:54,092 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 807 msec 2024-11-16T20:38:54,092 INFO [master/40c018648b21:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731789534092, completionTime=-1 2024-11-16T20:38:54,092 INFO [master/40c018648b21:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-16T20:38:54,092 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-16T20:38:54,094 INFO [master/40c018648b21:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-16T20:38:54,094 INFO [master/40c018648b21:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731789594094 2024-11-16T20:38:54,094 INFO [master/40c018648b21:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731789654094 2024-11-16T20:38:54,094 INFO [master/40c018648b21:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 1 msec 2024-11-16T20:38:54,094 INFO [master/40c018648b21:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=40c018648b21,45723,1731789532677-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T20:38:54,094 INFO [master/40c018648b21:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=40c018648b21,45723,1731789532677-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T20:38:54,094 INFO [master/40c018648b21:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=40c018648b21,45723,1731789532677-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T20:38:54,094 INFO [master/40c018648b21:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-40c018648b21:45723, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T20:38:54,094 INFO [master/40c018648b21:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-16T20:38:54,094 INFO [master/40c018648b21:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-16T20:38:54,096 DEBUG [master/40c018648b21:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-16T20:38:54,097 INFO [master/40c018648b21:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.106sec 2024-11-16T20:38:54,097 INFO [master/40c018648b21:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-16T20:38:54,097 INFO [master/40c018648b21:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-16T20:38:54,098 INFO [master/40c018648b21:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-16T20:38:54,098 INFO [master/40c018648b21:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-16T20:38:54,098 INFO [master/40c018648b21:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-16T20:38:54,098 INFO [master/40c018648b21:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=40c018648b21,45723,1731789532677-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-16T20:38:54,098 INFO [master/40c018648b21:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=40c018648b21,45723,1731789532677-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-16T20:38:54,100 DEBUG [master/40c018648b21:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-16T20:38:54,100 INFO [master/40c018648b21:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-16T20:38:54,100 INFO [master/40c018648b21:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=40c018648b21,45723,1731789532677-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T20:38:54,171 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5de62a29, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T20:38:54,171 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 40c018648b21,45723,-1 for getting cluster id 2024-11-16T20:38:54,171 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-16T20:38:54,173 DEBUG [HMaster-EventLoopGroup-16-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a784b55d-2b9b-4408-ba0b-6f3fe114e925' 2024-11-16T20:38:54,173 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-16T20:38:54,173 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a784b55d-2b9b-4408-ba0b-6f3fe114e925" 2024-11-16T20:38:54,174 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@162e873b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T20:38:54,174 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [40c018648b21,45723,-1] 2024-11-16T20:38:54,174 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-16T20:38:54,174 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T20:38:54,175 INFO [HMaster-EventLoopGroup-16-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41874, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-16T20:38:54,176 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1039770b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T20:38:54,177 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T20:38:54,178 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=40c018648b21,44211,1731789532850, seqNum=-1] 2024-11-16T20:38:54,179 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T20:38:54,180 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44638, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T20:38:54,183 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=40c018648b21,45723,1731789532677 2024-11-16T20:38:54,184 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T20:38:54,188 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-16T20:38:54,188 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T20:38:54,190 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=test.com%2C8080%2C1, suffix=, logDir=hdfs://localhost:45225/user/jenkins/test-data/38859eee-a0e7-2d8c-5aaa-ae1611839a1c/WALs/test.com,8080,1, archiveDir=hdfs://localhost:45225/user/jenkins/test-data/38859eee-a0e7-2d8c-5aaa-ae1611839a1c/oldWALs, maxLogs=32 2024-11-16T20:38:54,190 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1731789534190 2024-11-16T20:38:54,194 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/38859eee-a0e7-2d8c-5aaa-ae1611839a1c/WALs/test.com,8080,1/test.com%2C8080%2C1.1731789534190 2024-11-16T20:38:54,195 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41683:41683),(127.0.0.1/127.0.0.1:43853:43853)] 2024-11-16T20:38:54,196 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1731789534196 2024-11-16T20:38:54,201 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:38:54,201 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:38:54,201 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:38:54,201 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:38:54,201 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:38:54,201 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/38859eee-a0e7-2d8c-5aaa-ae1611839a1c/WALs/test.com,8080,1/test.com%2C8080%2C1.1731789534190 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/38859eee-a0e7-2d8c-5aaa-ae1611839a1c/WALs/test.com,8080,1/test.com%2C8080%2C1.1731789534196 2024-11-16T20:38:54,202 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43853:43853),(127.0.0.1/127.0.0.1:41683:41683)] 2024-11-16T20:38:54,202 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:45225/user/jenkins/test-data/38859eee-a0e7-2d8c-5aaa-ae1611839a1c/WALs/test.com,8080,1/test.com%2C8080%2C1.1731789534190 is not closed yet, will try archiving it next time 2024-11-16T20:38:54,203 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:38:54,203 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:38:54,203 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:38:54,203 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:38:54,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41377 is added to blk_1073741835_1011 (size=93) 2024-11-16T20:38:54,203 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:38:54,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42435 is added to blk_1073741835_1011 (size=93) 2024-11-16T20:38:54,204 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45225/user/jenkins/test-data/38859eee-a0e7-2d8c-5aaa-ae1611839a1c/WALs/test.com,8080,1/test.com%2C8080%2C1.1731789534190 to hdfs://localhost:45225/user/jenkins/test-data/38859eee-a0e7-2d8c-5aaa-ae1611839a1c/oldWALs/test.com%2C8080%2C1.1731789534190 2024-11-16T20:38:54,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42435 is added to blk_1073741836_1012 (size=93) 2024-11-16T20:38:54,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41377 is added to blk_1073741836_1012 (size=93) 2024-11-16T20:38:54,207 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/38859eee-a0e7-2d8c-5aaa-ae1611839a1c/oldWALs 2024-11-16T20:38:54,207 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog test.com%2C8080%2C1:(num 1731789534196) 2024-11-16T20:38:54,207 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-16T20:38:54,207 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-16T20:38:54,208 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T20:38:54,208 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T20:38:54,208 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T20:38:54,208 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-16T20:38:54,208 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-16T20:38:54,208 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=239043860, stopped=false 2024-11-16T20:38:54,208 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=40c018648b21,45723,1731789532677 2024-11-16T20:38:54,228 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45723-0x101455f29a10000, quorum=127.0.0.1:53778, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-16T20:38:54,228 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44211-0x101455f29a10001, quorum=127.0.0.1:53778, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-16T20:38:54,228 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45723-0x101455f29a10000, quorum=127.0.0.1:53778, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:38:54,228 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44211-0x101455f29a10001, quorum=127.0.0.1:53778, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:38:54,228 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-16T20:38:54,228 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-16T20:38:54,228 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T20:38:54,228 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T20:38:54,228 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:44211-0x101455f29a10001, quorum=127.0.0.1:53778, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T20:38:54,228 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '40c018648b21,44211,1731789532850' ***** 2024-11-16T20:38:54,229 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-16T20:38:54,229 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:45723-0x101455f29a10000, quorum=127.0.0.1:53778, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T20:38:54,229 INFO [RS:0;40c018648b21:44211 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-16T20:38:54,229 INFO [RS:0;40c018648b21:44211 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-16T20:38:54,229 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-16T20:38:54,229 INFO [RS:0;40c018648b21:44211 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-16T20:38:54,229 INFO [RS:0;40c018648b21:44211 {}] regionserver.HRegionServer(959): stopping server 40c018648b21,44211,1731789532850 2024-11-16T20:38:54,229 INFO [RS:0;40c018648b21:44211 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-16T20:38:54,229 INFO [RS:0;40c018648b21:44211 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;40c018648b21:44211. 2024-11-16T20:38:54,229 DEBUG [RS:0;40c018648b21:44211 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T20:38:54,229 DEBUG [RS:0;40c018648b21:44211 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T20:38:54,229 INFO [RS:0;40c018648b21:44211 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-16T20:38:54,229 INFO [RS:0;40c018648b21:44211 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-16T20:38:54,229 INFO [RS:0;40c018648b21:44211 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-16T20:38:54,229 INFO [RS:0;40c018648b21:44211 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-16T20:38:54,229 INFO [RS:0;40c018648b21:44211 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-16T20:38:54,229 DEBUG [RS:0;40c018648b21:44211 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-16T20:38:54,230 DEBUG [RS:0;40c018648b21:44211 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-16T20:38:54,230 DEBUG [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-16T20:38:54,230 INFO [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-16T20:38:54,230 DEBUG [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-16T20:38:54,230 DEBUG [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-16T20:38:54,230 DEBUG [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-16T20:38:54,230 INFO [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-16T20:38:54,245 DEBUG [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45225/user/jenkins/test-data/38859eee-a0e7-2d8c-5aaa-ae1611839a1c/data/hbase/meta/1588230740/.tmp/ns/e7f417fe573942d69c4fc73775e9cdd3 is 43, key is default/ns:d/1731789534087/Put/seqid=0 2024-11-16T20:38:54,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41377 is added to blk_1073741837_1013 (size=5153) 2024-11-16T20:38:54,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42435 is added to blk_1073741837_1013 (size=5153) 2024-11-16T20:38:54,250 INFO [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:45225/user/jenkins/test-data/38859eee-a0e7-2d8c-5aaa-ae1611839a1c/data/hbase/meta/1588230740/.tmp/ns/e7f417fe573942d69c4fc73775e9cdd3 2024-11-16T20:38:54,255 DEBUG [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45225/user/jenkins/test-data/38859eee-a0e7-2d8c-5aaa-ae1611839a1c/data/hbase/meta/1588230740/.tmp/ns/e7f417fe573942d69c4fc73775e9cdd3 as hdfs://localhost:45225/user/jenkins/test-data/38859eee-a0e7-2d8c-5aaa-ae1611839a1c/data/hbase/meta/1588230740/ns/e7f417fe573942d69c4fc73775e9cdd3 2024-11-16T20:38:54,261 INFO [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45225/user/jenkins/test-data/38859eee-a0e7-2d8c-5aaa-ae1611839a1c/data/hbase/meta/1588230740/ns/e7f417fe573942d69c4fc73775e9cdd3, entries=2, sequenceid=6, filesize=5.0 K 2024-11-16T20:38:54,262 INFO [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 32ms, sequenceid=6, compaction requested=false 2024-11-16T20:38:54,262 DEBUG [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-16T20:38:54,267 DEBUG [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45225/user/jenkins/test-data/38859eee-a0e7-2d8c-5aaa-ae1611839a1c/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-16T20:38:54,267 DEBUG [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-16T20:38:54,267 INFO [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-16T20:38:54,267 DEBUG [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731789534230Running coprocessor pre-close hooks at 1731789534230Disabling compacts and flushes for region at 1731789534230Disabling writes for close at 1731789534230Obtaining lock to block concurrent updates at 1731789534230Preparing flush snapshotting stores in 1588230740 at 1731789534230Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1731789534230Flushing stores of hbase:meta,,1.1588230740 at 1731789534230Flushing 1588230740/ns: creating writer at 1731789534231 (+1 ms)Flushing 1588230740/ns: appending metadata at 1731789534245 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1731789534245Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4f28e189: reopening flushed file at 1731789534255 (+10 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 32ms, sequenceid=6, compaction requested=false at 1731789534262 (+7 ms)Writing region close event to WAL at 1731789534263 (+1 ms)Running coprocessor post-close hooks at 1731789534267 (+4 ms)Closed at 1731789534267 2024-11-16T20:38:54,267 DEBUG [RS_CLOSE_META-regionserver/40c018648b21:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-16T20:38:54,430 INFO [RS:0;40c018648b21:44211 {}] regionserver.HRegionServer(976): stopping server 40c018648b21,44211,1731789532850; all regions closed. 2024-11-16T20:38:54,430 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:38:54,430 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:38:54,430 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:38:54,431 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:38:54,431 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:38:54,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41377 is added to blk_1073741834_1010 (size=1152) 2024-11-16T20:38:54,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42435 is added to blk_1073741834_1010 (size=1152) 2024-11-16T20:38:54,435 DEBUG [RS:0;40c018648b21:44211 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/38859eee-a0e7-2d8c-5aaa-ae1611839a1c/oldWALs 2024-11-16T20:38:54,435 INFO [RS:0;40c018648b21:44211 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 40c018648b21%2C44211%2C1731789532850.meta:.meta(num 1731789534011) 2024-11-16T20:38:54,435 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:38:54,435 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:38:54,435 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:38:54,435 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:38:54,435 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:38:54,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42435 is added to blk_1073741833_1009 (size=93) 2024-11-16T20:38:54,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41377 is added to blk_1073741833_1009 (size=93) 2024-11-16T20:38:54,439 DEBUG [RS:0;40c018648b21:44211 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/38859eee-a0e7-2d8c-5aaa-ae1611839a1c/oldWALs 2024-11-16T20:38:54,439 INFO [RS:0;40c018648b21:44211 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 40c018648b21%2C44211%2C1731789532850:(num 1731789533623) 2024-11-16T20:38:54,439 DEBUG [RS:0;40c018648b21:44211 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T20:38:54,439 INFO [RS:0;40c018648b21:44211 {}] regionserver.LeaseManager(133): Closed leases 2024-11-16T20:38:54,439 INFO [RS:0;40c018648b21:44211 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-16T20:38:54,440 INFO [RS:0;40c018648b21:44211 {}] hbase.ChoreService(370): Chore service for: regionserver/40c018648b21:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-16T20:38:54,440 INFO [RS:0;40c018648b21:44211 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-16T20:38:54,440 INFO [regionserver/40c018648b21:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-16T20:38:54,440 INFO [RS:0;40c018648b21:44211 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44211 2024-11-16T20:38:54,449 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45723-0x101455f29a10000, quorum=127.0.0.1:53778, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-16T20:38:54,449 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44211-0x101455f29a10001, quorum=127.0.0.1:53778, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/40c018648b21,44211,1731789532850 2024-11-16T20:38:54,449 INFO [RS:0;40c018648b21:44211 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-16T20:38:54,459 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [40c018648b21,44211,1731789532850] 2024-11-16T20:38:54,470 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/40c018648b21,44211,1731789532850 already deleted, retry=false 2024-11-16T20:38:54,470 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 40c018648b21,44211,1731789532850 expired; onlineServers=0 2024-11-16T20:38:54,470 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '40c018648b21,45723,1731789532677' ***** 2024-11-16T20:38:54,470 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-16T20:38:54,470 INFO [M:0;40c018648b21:45723 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-16T20:38:54,470 INFO [M:0;40c018648b21:45723 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-16T20:38:54,470 DEBUG [M:0;40c018648b21:45723 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-16T20:38:54,470 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-16T20:38:54,470 DEBUG [M:0;40c018648b21:45723 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-16T20:38:54,470 DEBUG [master/40c018648b21:0:becomeActiveMaster-HFileCleaner.small.0-1731789533297 {}] cleaner.HFileCleaner(306): Exit Thread[master/40c018648b21:0:becomeActiveMaster-HFileCleaner.small.0-1731789533297,5,FailOnTimeoutGroup] 2024-11-16T20:38:54,470 DEBUG [master/40c018648b21:0:becomeActiveMaster-HFileCleaner.large.0-1731789533292 {}] cleaner.HFileCleaner(306): Exit Thread[master/40c018648b21:0:becomeActiveMaster-HFileCleaner.large.0-1731789533292,5,FailOnTimeoutGroup] 2024-11-16T20:38:54,470 INFO [M:0;40c018648b21:45723 {}] hbase.ChoreService(370): Chore service for: master/40c018648b21:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-16T20:38:54,471 INFO [M:0;40c018648b21:45723 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-16T20:38:54,471 DEBUG [M:0;40c018648b21:45723 {}] master.HMaster(1795): Stopping service threads 2024-11-16T20:38:54,471 INFO [M:0;40c018648b21:45723 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-16T20:38:54,471 INFO [M:0;40c018648b21:45723 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-16T20:38:54,471 INFO [M:0;40c018648b21:45723 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-16T20:38:54,471 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-16T20:38:54,480 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45723-0x101455f29a10000, quorum=127.0.0.1:53778, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-16T20:38:54,481 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45723-0x101455f29a10000, quorum=127.0.0.1:53778, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T20:38:54,481 DEBUG [M:0;40c018648b21:45723 {}] zookeeper.ZKUtil(347): master:45723-0x101455f29a10000, quorum=127.0.0.1:53778, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-16T20:38:54,481 WARN [M:0;40c018648b21:45723 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-16T20:38:54,481 INFO [M:0;40c018648b21:45723 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:45225/user/jenkins/test-data/38859eee-a0e7-2d8c-5aaa-ae1611839a1c/.lastflushedseqids 2024-11-16T20:38:54,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41377 is added to blk_1073741838_1014 (size=99) 2024-11-16T20:38:54,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42435 is added to blk_1073741838_1014 (size=99) 2024-11-16T20:38:54,490 INFO [M:0;40c018648b21:45723 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-16T20:38:54,490 INFO [M:0;40c018648b21:45723 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-16T20:38:54,490 DEBUG [M:0;40c018648b21:45723 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-16T20:38:54,490 INFO [M:0;40c018648b21:45723 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T20:38:54,490 DEBUG [M:0;40c018648b21:45723 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T20:38:54,490 DEBUG [M:0;40c018648b21:45723 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-16T20:38:54,490 DEBUG [M:0;40c018648b21:45723 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T20:38:54,490 INFO [M:0;40c018648b21:45723 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-16T20:38:54,504 DEBUG [M:0;40c018648b21:45723 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45225/user/jenkins/test-data/38859eee-a0e7-2d8c-5aaa-ae1611839a1c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ae8b19e8340c48c3b59864cb2e8df250 is 82, key is hbase:meta,,1/info:regioninfo/1731789534037/Put/seqid=0 2024-11-16T20:38:54,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42435 is added to blk_1073741839_1015 (size=5672) 2024-11-16T20:38:54,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41377 is added to blk_1073741839_1015 (size=5672) 2024-11-16T20:38:54,509 INFO [M:0;40c018648b21:45723 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:45225/user/jenkins/test-data/38859eee-a0e7-2d8c-5aaa-ae1611839a1c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ae8b19e8340c48c3b59864cb2e8df250 2024-11-16T20:38:54,526 DEBUG [M:0;40c018648b21:45723 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45225/user/jenkins/test-data/38859eee-a0e7-2d8c-5aaa-ae1611839a1c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/d34ff501adca4f81a91cfbd18f801391 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1731789534091/Put/seqid=0 2024-11-16T20:38:54,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42435 is added to blk_1073741840_1016 (size=5275) 2024-11-16T20:38:54,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41377 is added to blk_1073741840_1016 (size=5275) 2024-11-16T20:38:54,530 INFO [M:0;40c018648b21:45723 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:45225/user/jenkins/test-data/38859eee-a0e7-2d8c-5aaa-ae1611839a1c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/d34ff501adca4f81a91cfbd18f801391 2024-11-16T20:38:54,548 DEBUG [M:0;40c018648b21:45723 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45225/user/jenkins/test-data/38859eee-a0e7-2d8c-5aaa-ae1611839a1c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/98e13051b4b043faa2930bb2570da142 is 69, key is 40c018648b21,44211,1731789532850/rs:state/1731789533474/Put/seqid=0 2024-11-16T20:38:54,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41377 is added to blk_1073741841_1017 (size=5156) 2024-11-16T20:38:54,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42435 is added to blk_1073741841_1017 (size=5156) 2024-11-16T20:38:54,552 INFO [M:0;40c018648b21:45723 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:45225/user/jenkins/test-data/38859eee-a0e7-2d8c-5aaa-ae1611839a1c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/98e13051b4b043faa2930bb2570da142 2024-11-16T20:38:54,560 INFO [RS:0;40c018648b21:44211 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-16T20:38:54,560 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44211-0x101455f29a10001, quorum=127.0.0.1:53778, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T20:38:54,560 INFO [RS:0;40c018648b21:44211 {}] regionserver.HRegionServer(1031): Exiting; stopping=40c018648b21,44211,1731789532850; zookeeper connection closed. 2024-11-16T20:38:54,560 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44211-0x101455f29a10001, quorum=127.0.0.1:53778, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T20:38:54,560 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@9b4cf6c {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@9b4cf6c 2024-11-16T20:38:54,560 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-16T20:38:54,569 DEBUG [M:0;40c018648b21:45723 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45225/user/jenkins/test-data/38859eee-a0e7-2d8c-5aaa-ae1611839a1c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/30c441241a5b4298a64e150ca8fb8bb2 is 52, key is load_balancer_on/state:d/1731789534186/Put/seqid=0 2024-11-16T20:38:54,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41377 is added to blk_1073741842_1018 (size=5056) 2024-11-16T20:38:54,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42435 is added to blk_1073741842_1018 (size=5056) 2024-11-16T20:38:54,575 INFO [M:0;40c018648b21:45723 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:45225/user/jenkins/test-data/38859eee-a0e7-2d8c-5aaa-ae1611839a1c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/30c441241a5b4298a64e150ca8fb8bb2 2024-11-16T20:38:54,579 DEBUG [M:0;40c018648b21:45723 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45225/user/jenkins/test-data/38859eee-a0e7-2d8c-5aaa-ae1611839a1c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ae8b19e8340c48c3b59864cb2e8df250 as hdfs://localhost:45225/user/jenkins/test-data/38859eee-a0e7-2d8c-5aaa-ae1611839a1c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/ae8b19e8340c48c3b59864cb2e8df250 2024-11-16T20:38:54,585 INFO [M:0;40c018648b21:45723 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45225/user/jenkins/test-data/38859eee-a0e7-2d8c-5aaa-ae1611839a1c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/ae8b19e8340c48c3b59864cb2e8df250, entries=8, sequenceid=29, filesize=5.5 K 2024-11-16T20:38:54,589 DEBUG [M:0;40c018648b21:45723 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45225/user/jenkins/test-data/38859eee-a0e7-2d8c-5aaa-ae1611839a1c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/d34ff501adca4f81a91cfbd18f801391 as hdfs://localhost:45225/user/jenkins/test-data/38859eee-a0e7-2d8c-5aaa-ae1611839a1c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/d34ff501adca4f81a91cfbd18f801391 2024-11-16T20:38:54,594 INFO [M:0;40c018648b21:45723 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45225/user/jenkins/test-data/38859eee-a0e7-2d8c-5aaa-ae1611839a1c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/d34ff501adca4f81a91cfbd18f801391, entries=3, sequenceid=29, filesize=5.2 K 2024-11-16T20:38:54,596 DEBUG [M:0;40c018648b21:45723 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45225/user/jenkins/test-data/38859eee-a0e7-2d8c-5aaa-ae1611839a1c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/98e13051b4b043faa2930bb2570da142 as hdfs://localhost:45225/user/jenkins/test-data/38859eee-a0e7-2d8c-5aaa-ae1611839a1c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/98e13051b4b043faa2930bb2570da142 2024-11-16T20:38:54,602 INFO [M:0;40c018648b21:45723 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45225/user/jenkins/test-data/38859eee-a0e7-2d8c-5aaa-ae1611839a1c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/98e13051b4b043faa2930bb2570da142, entries=1, sequenceid=29, filesize=5.0 K 2024-11-16T20:38:54,603 DEBUG [M:0;40c018648b21:45723 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45225/user/jenkins/test-data/38859eee-a0e7-2d8c-5aaa-ae1611839a1c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/30c441241a5b4298a64e150ca8fb8bb2 as hdfs://localhost:45225/user/jenkins/test-data/38859eee-a0e7-2d8c-5aaa-ae1611839a1c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/30c441241a5b4298a64e150ca8fb8bb2 2024-11-16T20:38:54,608 INFO [M:0;40c018648b21:45723 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45225/user/jenkins/test-data/38859eee-a0e7-2d8c-5aaa-ae1611839a1c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/30c441241a5b4298a64e150ca8fb8bb2, entries=1, sequenceid=29, filesize=4.9 K 2024-11-16T20:38:54,609 INFO [M:0;40c018648b21:45723 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 119ms, sequenceid=29, compaction requested=false 2024-11-16T20:38:54,612 INFO [M:0;40c018648b21:45723 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T20:38:54,612 DEBUG [M:0;40c018648b21:45723 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731789534490Disabling compacts and flushes for region at 1731789534490Disabling writes for close at 1731789534490Obtaining lock to block concurrent updates at 1731789534490Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731789534490Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1731789534491 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731789534491Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731789534491Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731789534504 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731789534504Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731789534512 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731789534525 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731789534525Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731789534534 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731789534547 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731789534547Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731789534556 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731789534569 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731789534569Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@68e8c05b: reopening flushed file at 1731789534578 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7d8d332b: reopening flushed file at 1731789534585 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@47dc17b4: reopening flushed file at 1731789534595 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7cbc3a91: reopening flushed file at 1731789534602 (+7 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 119ms, sequenceid=29, compaction requested=false at 1731789534609 (+7 ms)Writing region close event to WAL at 1731789534612 (+3 ms)Closed at 1731789534612 2024-11-16T20:38:54,613 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:38:54,613 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:38:54,613 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:38:54,613 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:38:54,613 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T20:38:54,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41377 is added to blk_1073741830_1006 (size=10311) 2024-11-16T20:38:54,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42435 is added to blk_1073741830_1006 (size=10311) 2024-11-16T20:38:54,616 INFO [M:0;40c018648b21:45723 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-16T20:38:54,616 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-16T20:38:54,616 INFO [M:0;40c018648b21:45723 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45723 2024-11-16T20:38:54,616 INFO [M:0;40c018648b21:45723 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-16T20:38:54,728 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45723-0x101455f29a10000, quorum=127.0.0.1:53778, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T20:38:54,728 INFO [M:0;40c018648b21:45723 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-16T20:38:54,728 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45723-0x101455f29a10000, quorum=127.0.0.1:53778, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T20:38:54,733 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7b3ba97d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T20:38:54,734 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2e0b3b7c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T20:38:54,734 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T20:38:54,735 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@403020f8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T20:38:54,735 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@fa9d1ff{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9cd896fc-7a75-cea9-feb1-9a0c737521b6/hadoop.log.dir/,STOPPED} 2024-11-16T20:38:54,738 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T20:38:54,738 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T20:38:54,738 WARN [BP-1614523203-172.17.0.2-1731789530296 heartbeating to localhost/127.0.0.1:45225 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T20:38:54,738 WARN [BP-1614523203-172.17.0.2-1731789530296 heartbeating to localhost/127.0.0.1:45225 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1614523203-172.17.0.2-1731789530296 (Datanode Uuid e6e02dbc-328b-485a-a54c-8394e2fba484) service to localhost/127.0.0.1:45225 2024-11-16T20:38:54,739 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9cd896fc-7a75-cea9-feb1-9a0c737521b6/cluster_05851c2a-d90d-4273-fcba-e46c749ce964/data/data3/current/BP-1614523203-172.17.0.2-1731789530296 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T20:38:54,740 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9cd896fc-7a75-cea9-feb1-9a0c737521b6/cluster_05851c2a-d90d-4273-fcba-e46c749ce964/data/data4/current/BP-1614523203-172.17.0.2-1731789530296 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T20:38:54,740 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T20:38:54,742 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@392cf3b8{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T20:38:54,742 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@56fc288a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T20:38:54,742 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T20:38:54,742 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2ec1c28e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T20:38:54,742 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@54008d53{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9cd896fc-7a75-cea9-feb1-9a0c737521b6/hadoop.log.dir/,STOPPED} 2024-11-16T20:38:54,743 WARN [BP-1614523203-172.17.0.2-1731789530296 heartbeating to localhost/127.0.0.1:45225 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T20:38:54,743 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T20:38:54,743 WARN [BP-1614523203-172.17.0.2-1731789530296 heartbeating to localhost/127.0.0.1:45225 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1614523203-172.17.0.2-1731789530296 (Datanode Uuid 344a6968-8040-4c53-800e-c7e091c9dc25) service to localhost/127.0.0.1:45225 2024-11-16T20:38:54,743 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T20:38:54,744 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9cd896fc-7a75-cea9-feb1-9a0c737521b6/cluster_05851c2a-d90d-4273-fcba-e46c749ce964/data/data1/current/BP-1614523203-172.17.0.2-1731789530296 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T20:38:54,744 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9cd896fc-7a75-cea9-feb1-9a0c737521b6/cluster_05851c2a-d90d-4273-fcba-e46c749ce964/data/data2/current/BP-1614523203-172.17.0.2-1731789530296 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T20:38:54,744 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T20:38:54,748 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@a7b167c{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-16T20:38:54,748 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@735ef7ff{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T20:38:54,748 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T20:38:54,748 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@69a2ae1b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T20:38:54,748 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3910812a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9cd896fc-7a75-cea9-feb1-9a0c737521b6/hadoop.log.dir/,STOPPED} 2024-11-16T20:38:54,753 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-16T20:38:54,768 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-16T20:38:54,775 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=269 (was 231) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:45225 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-44-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/40c018648b21:0.leaseChecker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.regionserver.LeaseManager.run(LeaseManager.java:82) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.7@localhost:45225 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:45225 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:45225 from jenkins.hfs.7 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-45-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45225 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:45225 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45225 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45225 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=534 (was 519) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=197 (was 215), ProcessCount=11 (was 11), AvailableMemoryMB=4462 (was 4569)