2024-11-11 04:32:22,862 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-11 04:32:22,873 main DEBUG Took 0.009742 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-11 04:32:22,874 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-11 04:32:22,874 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-11 04:32:22,875 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-11 04:32:22,876 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 04:32:22,883 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-11 04:32:22,894 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 04:32:22,896 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 04:32:22,896 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 04:32:22,897 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 04:32:22,897 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 04:32:22,897 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 04:32:22,898 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 04:32:22,898 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 04:32:22,899 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 04:32:22,899 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 04:32:22,900 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 04:32:22,900 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 04:32:22,900 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 04:32:22,901 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 04:32:22,901 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 04:32:22,901 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 04:32:22,902 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 04:32:22,902 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 04:32:22,902 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 04:32:22,902 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 04:32:22,903 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 04:32:22,903 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 04:32:22,904 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 04:32:22,904 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 04:32:22,904 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 04:32:22,904 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-11 04:32:22,906 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 04:32:22,907 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-11 04:32:22,909 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-11 04:32:22,909 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-11 04:32:22,910 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-11 04:32:22,910 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-11 04:32:22,918 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-11 04:32:22,920 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-11 04:32:22,922 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-11 04:32:22,922 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-11 04:32:22,922 main DEBUG createAppenders(={Console}) 2024-11-11 04:32:22,923 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca initialized 2024-11-11 04:32:22,923 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-11 04:32:22,924 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca OK. 2024-11-11 04:32:22,924 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-11 04:32:22,924 main DEBUG OutputStream closed 2024-11-11 04:32:22,925 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-11 04:32:22,925 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-11 04:32:22,925 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@6404f418 OK 2024-11-11 04:32:23,004 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-11 04:32:23,006 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-11 04:32:23,007 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-11 04:32:23,008 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-11 04:32:23,009 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-11 04:32:23,009 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-11 04:32:23,009 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-11 04:32:23,009 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-11 04:32:23,010 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-11 04:32:23,010 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-11 04:32:23,010 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-11 04:32:23,011 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-11 04:32:23,011 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-11 04:32:23,011 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-11 04:32:23,011 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-11 04:32:23,012 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-11 04:32:23,012 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-11 04:32:23,013 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-11 04:32:23,015 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-11 04:32:23,015 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@6dab9b6d) with optional ClassLoader: null 2024-11-11 04:32:23,016 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-11 04:32:23,016 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@6dab9b6d] started OK. 2024-11-11T04:32:23,238 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f40903ad-f2fe-b9c6-36ae-562e56287c00 2024-11-11 04:32:23,241 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-11 04:32:23,241 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-11T04:32:23,251 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestLogRolling timeout: 13 mins 2024-11-11T04:32:23,284 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=12, OpenFileDescriptor=287, MaxFileDescriptor=1048576, SystemLoadAverage=4, ProcessCount=11, AvailableMemoryMB=7760 2024-11-11T04:32:23,287 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-11T04:32:23,301 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f40903ad-f2fe-b9c6-36ae-562e56287c00/cluster_014cb21e-57e7-8d88-1d85-bab754db7f65, deleteOnExit=true 2024-11-11T04:32:23,301 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-11T04:32:23,302 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f40903ad-f2fe-b9c6-36ae-562e56287c00/test.cache.data in system properties and HBase conf 2024-11-11T04:32:23,303 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f40903ad-f2fe-b9c6-36ae-562e56287c00/hadoop.tmp.dir in system properties and HBase conf 2024-11-11T04:32:23,303 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f40903ad-f2fe-b9c6-36ae-562e56287c00/hadoop.log.dir in system properties and HBase conf 2024-11-11T04:32:23,304 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f40903ad-f2fe-b9c6-36ae-562e56287c00/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-11T04:32:23,304 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f40903ad-f2fe-b9c6-36ae-562e56287c00/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-11T04:32:23,305 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-11T04:32:23,390 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-11T04:32:23,477 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-11T04:32:23,481 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f40903ad-f2fe-b9c6-36ae-562e56287c00/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-11T04:32:23,482 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f40903ad-f2fe-b9c6-36ae-562e56287c00/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-11T04:32:23,482 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f40903ad-f2fe-b9c6-36ae-562e56287c00/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-11T04:32:23,483 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f40903ad-f2fe-b9c6-36ae-562e56287c00/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-11T04:32:23,483 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f40903ad-f2fe-b9c6-36ae-562e56287c00/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-11T04:32:23,484 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f40903ad-f2fe-b9c6-36ae-562e56287c00/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-11T04:32:23,484 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f40903ad-f2fe-b9c6-36ae-562e56287c00/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-11T04:32:23,484 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f40903ad-f2fe-b9c6-36ae-562e56287c00/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-11T04:32:23,485 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f40903ad-f2fe-b9c6-36ae-562e56287c00/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-11T04:32:23,485 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f40903ad-f2fe-b9c6-36ae-562e56287c00/nfs.dump.dir in system properties and HBase conf 2024-11-11T04:32:23,486 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f40903ad-f2fe-b9c6-36ae-562e56287c00/java.io.tmpdir in system properties and HBase conf 2024-11-11T04:32:23,486 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f40903ad-f2fe-b9c6-36ae-562e56287c00/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-11T04:32:23,486 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f40903ad-f2fe-b9c6-36ae-562e56287c00/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-11T04:32:23,487 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f40903ad-f2fe-b9c6-36ae-562e56287c00/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-11T04:32:24,004 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-11T04:32:24,360 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-11T04:32:24,457 INFO [Time-limited test {}] log.Log(170): Logging initialized @2258ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-11T04:32:24,547 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T04:32:24,614 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T04:32:24,635 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T04:32:24,636 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T04:32:24,637 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-11T04:32:24,651 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T04:32:24,654 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f40903ad-f2fe-b9c6-36ae-562e56287c00/hadoop.log.dir/,AVAILABLE} 2024-11-11T04:32:24,655 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T04:32:24,856 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@735fa16a{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f40903ad-f2fe-b9c6-36ae-562e56287c00/java.io.tmpdir/jetty-localhost-40169-hadoop-hdfs-3_4_1-tests_jar-_-any-6025328357110033249/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-11T04:32:24,863 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:40169} 2024-11-11T04:32:24,864 INFO [Time-limited test {}] server.Server(415): Started @2666ms 2024-11-11T04:32:24,893 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-11T04:32:25,279 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T04:32:25,286 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T04:32:25,289 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T04:32:25,289 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T04:32:25,289 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-11T04:32:25,290 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@28778f0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f40903ad-f2fe-b9c6-36ae-562e56287c00/hadoop.log.dir/,AVAILABLE} 2024-11-11T04:32:25,291 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@371e191c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T04:32:25,413 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7b07d1ba{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f40903ad-f2fe-b9c6-36ae-562e56287c00/java.io.tmpdir/jetty-localhost-45005-hadoop-hdfs-3_4_1-tests_jar-_-any-1267795758402584514/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T04:32:25,414 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@43e0a762{HTTP/1.1, (http/1.1)}{localhost:45005} 2024-11-11T04:32:25,415 INFO [Time-limited test {}] server.Server(415): Started @3217ms 2024-11-11T04:32:25,473 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T04:32:25,604 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T04:32:25,611 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T04:32:25,613 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T04:32:25,613 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T04:32:25,613 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-11T04:32:25,614 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@11effdcd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f40903ad-f2fe-b9c6-36ae-562e56287c00/hadoop.log.dir/,AVAILABLE} 2024-11-11T04:32:25,616 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2d48d695{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T04:32:25,747 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1bf97579{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f40903ad-f2fe-b9c6-36ae-562e56287c00/java.io.tmpdir/jetty-localhost-40417-hadoop-hdfs-3_4_1-tests_jar-_-any-6081363664795301447/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T04:32:25,748 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@22b88bcb{HTTP/1.1, (http/1.1)}{localhost:40417} 2024-11-11T04:32:25,748 INFO [Time-limited test {}] server.Server(415): Started @3550ms 2024-11-11T04:32:25,751 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T04:32:25,952 WARN [Thread-97 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f40903ad-f2fe-b9c6-36ae-562e56287c00/cluster_014cb21e-57e7-8d88-1d85-bab754db7f65/data/data1/current/BP-1569374830-172.17.0.2-1731299544104/current, will proceed with Du for space computation calculation, 2024-11-11T04:32:25,952 WARN [Thread-95 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f40903ad-f2fe-b9c6-36ae-562e56287c00/cluster_014cb21e-57e7-8d88-1d85-bab754db7f65/data/data3/current/BP-1569374830-172.17.0.2-1731299544104/current, will proceed with Du for space computation calculation, 2024-11-11T04:32:25,952 WARN [Thread-96 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f40903ad-f2fe-b9c6-36ae-562e56287c00/cluster_014cb21e-57e7-8d88-1d85-bab754db7f65/data/data4/current/BP-1569374830-172.17.0.2-1731299544104/current, will proceed with Du for space computation calculation, 2024-11-11T04:32:25,954 WARN [Thread-98 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f40903ad-f2fe-b9c6-36ae-562e56287c00/cluster_014cb21e-57e7-8d88-1d85-bab754db7f65/data/data2/current/BP-1569374830-172.17.0.2-1731299544104/current, will proceed with Du for space computation calculation, 2024-11-11T04:32:26,015 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T04:32:26,016 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T04:32:26,086 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xafdb473330ac9525 with lease ID 0x5c37eacbe85a4656: Processing first storage report for DS-267b11f2-8888-4f53-9941-c7aec908c8b1 from datanode DatanodeRegistration(127.0.0.1:45519, datanodeUuid=e5096401-9f81-4bcc-b758-17a7abe469fb, infoPort=41903, infoSecurePort=0, ipcPort=40267, storageInfo=lv=-57;cid=testClusterID;nsid=566671522;c=1731299544104) 2024-11-11T04:32:26,087 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xafdb473330ac9525 with lease ID 0x5c37eacbe85a4656: from storage DS-267b11f2-8888-4f53-9941-c7aec908c8b1 node DatanodeRegistration(127.0.0.1:45519, datanodeUuid=e5096401-9f81-4bcc-b758-17a7abe469fb, infoPort=41903, infoSecurePort=0, ipcPort=40267, storageInfo=lv=-57;cid=testClusterID;nsid=566671522;c=1731299544104), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-11-11T04:32:26,087 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb8ef07b942411b24 with lease ID 0x5c37eacbe85a4657: Processing first storage report for DS-9dfaa11f-93fb-4718-9634-4c09b6b94647 from datanode DatanodeRegistration(127.0.0.1:43443, datanodeUuid=e9993b30-b5a1-43a2-b7d8-81e16e30c1f4, infoPort=33195, infoSecurePort=0, ipcPort=35921, storageInfo=lv=-57;cid=testClusterID;nsid=566671522;c=1731299544104) 2024-11-11T04:32:26,088 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb8ef07b942411b24 with lease ID 0x5c37eacbe85a4657: from storage DS-9dfaa11f-93fb-4718-9634-4c09b6b94647 node DatanodeRegistration(127.0.0.1:43443, datanodeUuid=e9993b30-b5a1-43a2-b7d8-81e16e30c1f4, infoPort=33195, infoSecurePort=0, ipcPort=35921, storageInfo=lv=-57;cid=testClusterID;nsid=566671522;c=1731299544104), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T04:32:26,088 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xafdb473330ac9525 with lease ID 0x5c37eacbe85a4656: Processing first storage report for DS-3944b459-a87f-4c39-9d5a-6a64a48961e3 from datanode DatanodeRegistration(127.0.0.1:45519, datanodeUuid=e5096401-9f81-4bcc-b758-17a7abe469fb, infoPort=41903, infoSecurePort=0, ipcPort=40267, storageInfo=lv=-57;cid=testClusterID;nsid=566671522;c=1731299544104) 2024-11-11T04:32:26,088 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xafdb473330ac9525 with lease ID 0x5c37eacbe85a4656: from storage DS-3944b459-a87f-4c39-9d5a-6a64a48961e3 node DatanodeRegistration(127.0.0.1:45519, datanodeUuid=e5096401-9f81-4bcc-b758-17a7abe469fb, infoPort=41903, infoSecurePort=0, ipcPort=40267, storageInfo=lv=-57;cid=testClusterID;nsid=566671522;c=1731299544104), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-11T04:32:26,088 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb8ef07b942411b24 with lease ID 0x5c37eacbe85a4657: Processing first storage report for DS-78e7b70b-23c4-4a29-af06-a594b477e277 from datanode DatanodeRegistration(127.0.0.1:43443, datanodeUuid=e9993b30-b5a1-43a2-b7d8-81e16e30c1f4, infoPort=33195, infoSecurePort=0, ipcPort=35921, storageInfo=lv=-57;cid=testClusterID;nsid=566671522;c=1731299544104) 2024-11-11T04:32:26,088 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb8ef07b942411b24 with lease ID 0x5c37eacbe85a4657: from storage DS-78e7b70b-23c4-4a29-af06-a594b477e277 node DatanodeRegistration(127.0.0.1:43443, datanodeUuid=e9993b30-b5a1-43a2-b7d8-81e16e30c1f4, infoPort=33195, infoSecurePort=0, ipcPort=35921, storageInfo=lv=-57;cid=testClusterID;nsid=566671522;c=1731299544104), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T04:32:26,135 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f40903ad-f2fe-b9c6-36ae-562e56287c00 2024-11-11T04:32:26,216 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f40903ad-f2fe-b9c6-36ae-562e56287c00/cluster_014cb21e-57e7-8d88-1d85-bab754db7f65/zookeeper_0, clientPort=64255, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f40903ad-f2fe-b9c6-36ae-562e56287c00/cluster_014cb21e-57e7-8d88-1d85-bab754db7f65/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f40903ad-f2fe-b9c6-36ae-562e56287c00/cluster_014cb21e-57e7-8d88-1d85-bab754db7f65/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-11T04:32:26,227 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=64255 2024-11-11T04:32:26,241 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T04:32:26,245 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T04:32:26,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43443 is added to blk_1073741825_1001 (size=7) 2024-11-11T04:32:26,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45519 is added to blk_1073741825_1001 (size=7) 2024-11-11T04:32:26,898 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5 with version=8 2024-11-11T04:32:26,898 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/hbase-staging 2024-11-11T04:32:26,991 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-11T04:32:27,243 INFO [Time-limited test {}] client.ConnectionUtils(128): master/a7bef91497aa:0 server-side Connection retries=45 2024-11-11T04:32:27,255 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T04:32:27,255 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-11T04:32:27,260 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-11T04:32:27,260 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T04:32:27,260 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-11T04:32:27,398 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-11T04:32:27,460 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-11T04:32:27,469 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-11T04:32:27,473 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-11T04:32:27,500 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 4255 (auto-detected) 2024-11-11T04:32:27,501 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-11T04:32:27,520 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46043 2024-11-11T04:32:27,540 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:46043 connecting to ZooKeeper ensemble=127.0.0.1:64255 2024-11-11T04:32:27,573 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:460430x0, quorum=127.0.0.1:64255, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-11T04:32:27,576 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:46043-0x101959a09bc0000 connected 2024-11-11T04:32:27,605 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T04:32:27,609 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T04:32:27,624 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46043-0x101959a09bc0000, quorum=127.0.0.1:64255, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T04:32:27,629 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5, hbase.cluster.distributed=false 2024-11-11T04:32:27,653 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46043-0x101959a09bc0000, quorum=127.0.0.1:64255, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-11T04:32:27,659 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46043 2024-11-11T04:32:27,660 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46043 2024-11-11T04:32:27,661 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46043 2024-11-11T04:32:27,661 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46043 2024-11-11T04:32:27,662 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46043 2024-11-11T04:32:27,778 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/a7bef91497aa:0 server-side Connection retries=45 2024-11-11T04:32:27,780 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T04:32:27,781 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-11T04:32:27,781 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-11T04:32:27,781 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T04:32:27,781 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-11T04:32:27,784 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-11T04:32:27,786 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-11T04:32:27,787 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33113 2024-11-11T04:32:27,789 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:33113 connecting to ZooKeeper ensemble=127.0.0.1:64255 2024-11-11T04:32:27,790 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T04:32:27,793 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T04:32:27,799 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:331130x0, quorum=127.0.0.1:64255, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-11T04:32:27,800 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:331130x0, quorum=127.0.0.1:64255, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T04:32:27,800 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:33113-0x101959a09bc0001 connected 2024-11-11T04:32:27,806 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-11T04:32:27,817 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-11T04:32:27,820 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33113-0x101959a09bc0001, quorum=127.0.0.1:64255, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-11T04:32:27,825 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33113-0x101959a09bc0001, quorum=127.0.0.1:64255, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-11T04:32:27,826 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33113 2024-11-11T04:32:27,826 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33113 2024-11-11T04:32:27,827 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33113 2024-11-11T04:32:27,828 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33113 2024-11-11T04:32:27,828 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33113 2024-11-11T04:32:27,844 DEBUG [M:0;a7bef91497aa:46043 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;a7bef91497aa:46043 2024-11-11T04:32:27,845 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/a7bef91497aa,46043,1731299547044 2024-11-11T04:32:27,853 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33113-0x101959a09bc0001, quorum=127.0.0.1:64255, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T04:32:27,853 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46043-0x101959a09bc0000, quorum=127.0.0.1:64255, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T04:32:27,855 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46043-0x101959a09bc0000, quorum=127.0.0.1:64255, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/a7bef91497aa,46043,1731299547044 2024-11-11T04:32:27,877 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33113-0x101959a09bc0001, quorum=127.0.0.1:64255, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-11T04:32:27,877 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46043-0x101959a09bc0000, quorum=127.0.0.1:64255, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:32:27,877 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33113-0x101959a09bc0001, quorum=127.0.0.1:64255, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:32:27,878 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46043-0x101959a09bc0000, quorum=127.0.0.1:64255, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-11T04:32:27,879 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/a7bef91497aa,46043,1731299547044 from backup master directory 2024-11-11T04:32:27,883 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46043-0x101959a09bc0000, quorum=127.0.0.1:64255, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/a7bef91497aa,46043,1731299547044 2024-11-11T04:32:27,883 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33113-0x101959a09bc0001, quorum=127.0.0.1:64255, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T04:32:27,883 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46043-0x101959a09bc0000, quorum=127.0.0.1:64255, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T04:32:27,883 WARN [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-11T04:32:27,884 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=a7bef91497aa,46043,1731299547044 2024-11-11T04:32:27,886 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-11T04:32:27,887 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-11T04:32:27,949 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/hbase.id] with ID: d2c8e528-0842-4dbe-b712-56be6fb899e6 2024-11-11T04:32:27,949 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/.tmp/hbase.id 2024-11-11T04:32:27,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43443 is added to blk_1073741826_1002 (size=42) 2024-11-11T04:32:27,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45519 is added to blk_1073741826_1002 (size=42) 2024-11-11T04:32:27,964 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/.tmp/hbase.id]:[hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/hbase.id] 2024-11-11T04:32:28,011 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T04:32:28,017 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-11T04:32:28,038 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 19ms. 2024-11-11T04:32:28,044 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46043-0x101959a09bc0000, quorum=127.0.0.1:64255, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:32:28,044 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33113-0x101959a09bc0001, quorum=127.0.0.1:64255, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:32:28,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45519 is added to blk_1073741827_1003 (size=196) 2024-11-11T04:32:28,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43443 is added to blk_1073741827_1003 (size=196) 2024-11-11T04:32:28,079 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-11T04:32:28,081 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-11T04:32:28,087 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T04:32:28,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45519 is added to blk_1073741828_1004 (size=1189) 2024-11-11T04:32:28,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43443 is added to blk_1073741828_1004 (size=1189) 2024-11-11T04:32:28,143 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/MasterData/data/master/store 2024-11-11T04:32:28,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45519 is added to blk_1073741829_1005 (size=34) 2024-11-11T04:32:28,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43443 is added to blk_1073741829_1005 (size=34) 2024-11-11T04:32:28,172 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-11T04:32:28,176 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T04:32:28,177 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-11T04:32:28,177 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T04:32:28,177 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T04:32:28,179 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-11T04:32:28,179 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T04:32:28,180 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T04:32:28,181 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731299548177Disabling compacts and flushes for region at 1731299548177Disabling writes for close at 1731299548179 (+2 ms)Writing region close event to WAL at 1731299548180 (+1 ms)Closed at 1731299548180 2024-11-11T04:32:28,184 WARN [master/a7bef91497aa:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/MasterData/data/master/store/.initializing 2024-11-11T04:32:28,184 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/MasterData/WALs/a7bef91497aa,46043,1731299547044 2024-11-11T04:32:28,208 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a7bef91497aa%2C46043%2C1731299547044, suffix=, logDir=hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/MasterData/WALs/a7bef91497aa,46043,1731299547044, archiveDir=hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/MasterData/oldWALs, maxLogs=10 2024-11-11T04:32:28,220 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor a7bef91497aa%2C46043%2C1731299547044.1731299548215 2024-11-11T04:32:28,242 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/MasterData/WALs/a7bef91497aa,46043,1731299547044/a7bef91497aa%2C46043%2C1731299547044.1731299548215 2024-11-11T04:32:28,256 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41903:41903),(127.0.0.1/127.0.0.1:33195:33195)] 2024-11-11T04:32:28,258 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-11T04:32:28,259 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T04:32:28,263 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:32:28,265 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:32:28,310 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:32:28,338 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-11T04:32:28,342 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:32:28,345 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T04:32:28,346 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:32:28,350 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-11T04:32:28,350 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:32:28,351 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T04:32:28,351 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:32:28,354 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-11T04:32:28,354 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:32:28,355 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T04:32:28,356 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:32:28,358 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-11T04:32:28,358 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:32:28,359 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T04:32:28,360 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:32:28,363 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:32:28,365 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:32:28,370 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:32:28,371 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:32:28,376 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-11T04:32:28,380 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:32:28,386 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T04:32:28,387 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=753852, jitterRate=-0.04142886400222778}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-11T04:32:28,394 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731299548284Initializing all the Stores at 1731299548287 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731299548287Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731299548288 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731299548289 (+1 ms)Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731299548289Cleaning up temporary data from old regions at 1731299548371 (+82 ms)Region opened successfully at 1731299548394 (+23 ms) 2024-11-11T04:32:28,396 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-11T04:32:28,435 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7820257d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=a7bef91497aa/172.17.0.2:0 2024-11-11T04:32:28,469 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-11T04:32:28,482 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-11T04:32:28,482 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-11T04:32:28,485 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-11T04:32:28,487 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-11T04:32:28,492 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 5 msec 2024-11-11T04:32:28,493 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-11T04:32:28,521 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-11T04:32:28,530 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46043-0x101959a09bc0000, quorum=127.0.0.1:64255, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-11T04:32:28,532 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-11T04:32:28,535 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-11T04:32:28,536 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46043-0x101959a09bc0000, quorum=127.0.0.1:64255, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-11T04:32:28,538 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-11T04:32:28,540 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-11T04:32:28,544 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46043-0x101959a09bc0000, quorum=127.0.0.1:64255, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-11T04:32:28,545 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-11T04:32:28,547 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46043-0x101959a09bc0000, quorum=127.0.0.1:64255, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-11T04:32:28,549 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-11T04:32:28,566 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46043-0x101959a09bc0000, quorum=127.0.0.1:64255, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-11T04:32:28,568 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-11T04:32:28,573 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33113-0x101959a09bc0001, quorum=127.0.0.1:64255, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-11T04:32:28,573 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46043-0x101959a09bc0000, quorum=127.0.0.1:64255, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-11T04:32:28,573 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33113-0x101959a09bc0001, quorum=127.0.0.1:64255, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:32:28,573 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46043-0x101959a09bc0000, quorum=127.0.0.1:64255, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:32:28,576 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=a7bef91497aa,46043,1731299547044, sessionid=0x101959a09bc0000, setting cluster-up flag (Was=false) 2024-11-11T04:32:28,593 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33113-0x101959a09bc0001, quorum=127.0.0.1:64255, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:32:28,593 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46043-0x101959a09bc0000, quorum=127.0.0.1:64255, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:32:28,600 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-11T04:32:28,602 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=a7bef91497aa,46043,1731299547044 2024-11-11T04:32:28,609 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46043-0x101959a09bc0000, quorum=127.0.0.1:64255, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:32:28,609 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33113-0x101959a09bc0001, quorum=127.0.0.1:64255, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:32:28,615 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-11T04:32:28,617 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=a7bef91497aa,46043,1731299547044 2024-11-11T04:32:28,624 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-11T04:32:28,633 INFO [RS:0;a7bef91497aa:33113 {}] regionserver.HRegionServer(746): ClusterId : d2c8e528-0842-4dbe-b712-56be6fb899e6 2024-11-11T04:32:28,637 DEBUG [RS:0;a7bef91497aa:33113 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-11T04:32:28,643 DEBUG [RS:0;a7bef91497aa:33113 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-11T04:32:28,644 DEBUG [RS:0;a7bef91497aa:33113 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-11T04:32:28,647 DEBUG [RS:0;a7bef91497aa:33113 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-11T04:32:28,648 DEBUG [RS:0;a7bef91497aa:33113 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1549573e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=a7bef91497aa/172.17.0.2:0 2024-11-11T04:32:28,665 DEBUG [RS:0;a7bef91497aa:33113 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;a7bef91497aa:33113 2024-11-11T04:32:28,668 INFO [RS:0;a7bef91497aa:33113 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-11T04:32:28,668 INFO [RS:0;a7bef91497aa:33113 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-11T04:32:28,668 DEBUG [RS:0;a7bef91497aa:33113 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-11T04:32:28,671 INFO [RS:0;a7bef91497aa:33113 {}] regionserver.HRegionServer(2659): reportForDuty to master=a7bef91497aa,46043,1731299547044 with port=33113, startcode=1731299547737 2024-11-11T04:32:28,683 DEBUG [RS:0;a7bef91497aa:33113 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-11T04:32:28,706 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-11T04:32:28,716 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-11T04:32:28,723 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-11T04:32:28,729 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: a7bef91497aa,46043,1731299547044 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-11T04:32:28,736 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/a7bef91497aa:0, corePoolSize=5, maxPoolSize=5 2024-11-11T04:32:28,737 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/a7bef91497aa:0, corePoolSize=5, maxPoolSize=5 2024-11-11T04:32:28,737 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/a7bef91497aa:0, corePoolSize=5, maxPoolSize=5 2024-11-11T04:32:28,737 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/a7bef91497aa:0, corePoolSize=5, maxPoolSize=5 2024-11-11T04:32:28,737 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/a7bef91497aa:0, corePoolSize=10, maxPoolSize=10 2024-11-11T04:32:28,737 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:32:28,737 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/a7bef91497aa:0, corePoolSize=2, maxPoolSize=2 2024-11-11T04:32:28,737 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:32:28,738 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731299578738 2024-11-11T04:32:28,740 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-11T04:32:28,741 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-11T04:32:28,743 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-11T04:32:28,744 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-11T04:32:28,744 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-11T04:32:28,745 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-11T04:32:28,745 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-11T04:32:28,746 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-11T04:32:28,747 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-11T04:32:28,751 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:32:28,751 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-11T04:32:28,752 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-11T04:32:28,754 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-11T04:32:28,754 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-11T04:32:28,757 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-11T04:32:28,761 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-11T04:32:28,763 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39253, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-11T04:32:28,764 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/a7bef91497aa:0:becomeActiveMaster-HFileCleaner.large.0-1731299548763,5,FailOnTimeoutGroup] 2024-11-11T04:32:28,765 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/a7bef91497aa:0:becomeActiveMaster-HFileCleaner.small.0-1731299548764,5,FailOnTimeoutGroup] 2024-11-11T04:32:28,765 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-11T04:32:28,765 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-11T04:32:28,766 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-11T04:32:28,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43443 is added to blk_1073741831_1007 (size=1321) 2024-11-11T04:32:28,767 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-11T04:32:28,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45519 is added to blk_1073741831_1007 (size=1321) 2024-11-11T04:32:28,771 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-11T04:32:28,771 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46043 {}] master.ServerManager(363): Checking decommissioned status of RegionServer a7bef91497aa,33113,1731299547737 2024-11-11T04:32:28,772 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5 2024-11-11T04:32:28,774 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46043 {}] master.ServerManager(517): Registering regionserver=a7bef91497aa,33113,1731299547737 2024-11-11T04:32:28,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45519 is added to blk_1073741832_1008 (size=32) 2024-11-11T04:32:28,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43443 is added to blk_1073741832_1008 (size=32) 2024-11-11T04:32:28,792 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T04:32:28,792 DEBUG [RS:0;a7bef91497aa:33113 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5 2024-11-11T04:32:28,792 DEBUG [RS:0;a7bef91497aa:33113 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:33227 2024-11-11T04:32:28,792 DEBUG [RS:0;a7bef91497aa:33113 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-11T04:32:28,795 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-11T04:32:28,797 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46043-0x101959a09bc0000, quorum=127.0.0.1:64255, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-11T04:32:28,798 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-11T04:32:28,798 DEBUG [RS:0;a7bef91497aa:33113 {}] zookeeper.ZKUtil(111): regionserver:33113-0x101959a09bc0001, quorum=127.0.0.1:64255, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/a7bef91497aa,33113,1731299547737 2024-11-11T04:32:28,798 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:32:28,798 WARN [RS:0;a7bef91497aa:33113 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-11T04:32:28,798 INFO [RS:0;a7bef91497aa:33113 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T04:32:28,798 DEBUG [RS:0;a7bef91497aa:33113 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/WALs/a7bef91497aa,33113,1731299547737 2024-11-11T04:32:28,799 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T04:32:28,799 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-11T04:32:28,800 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [a7bef91497aa,33113,1731299547737] 2024-11-11T04:32:28,802 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-11T04:32:28,803 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:32:28,804 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T04:32:28,804 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-11T04:32:28,807 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-11T04:32:28,807 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:32:28,808 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T04:32:28,809 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-11T04:32:28,811 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-11T04:32:28,811 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:32:28,812 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T04:32:28,813 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-11T04:32:28,814 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/data/hbase/meta/1588230740 2024-11-11T04:32:28,815 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/data/hbase/meta/1588230740 2024-11-11T04:32:28,818 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-11T04:32:28,818 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-11T04:32:28,819 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-11T04:32:28,821 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-11T04:32:28,825 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T04:32:28,826 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=798915, jitterRate=0.015873149037361145}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-11T04:32:28,827 INFO [RS:0;a7bef91497aa:33113 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-11T04:32:28,830 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731299548792Initializing all the Stores at 1731299548794 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731299548794Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731299548794Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731299548794Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731299548795 (+1 ms)Cleaning up temporary data from old regions at 1731299548818 (+23 ms)Region opened successfully at 1731299548830 (+12 ms) 2024-11-11T04:32:28,830 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-11T04:32:28,830 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-11T04:32:28,830 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-11T04:32:28,831 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-11T04:32:28,831 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-11T04:32:28,832 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-11T04:32:28,832 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731299548830Disabling compacts and flushes for region at 1731299548830Disabling writes for close at 1731299548831 (+1 ms)Writing region close event to WAL at 1731299548832 (+1 ms)Closed at 1731299548832 2024-11-11T04:32:28,836 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-11T04:32:28,836 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-11T04:32:28,841 INFO [RS:0;a7bef91497aa:33113 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-11T04:32:28,843 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-11T04:32:28,846 INFO [RS:0;a7bef91497aa:33113 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-11T04:32:28,846 INFO [RS:0;a7bef91497aa:33113 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T04:32:28,847 INFO [RS:0;a7bef91497aa:33113 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-11T04:32:28,852 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-11T04:32:28,853 INFO [RS:0;a7bef91497aa:33113 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-11T04:32:28,855 INFO [RS:0;a7bef91497aa:33113 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-11T04:32:28,855 DEBUG [RS:0;a7bef91497aa:33113 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:32:28,855 DEBUG [RS:0;a7bef91497aa:33113 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:32:28,856 DEBUG [RS:0;a7bef91497aa:33113 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:32:28,856 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-11T04:32:28,856 DEBUG [RS:0;a7bef91497aa:33113 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:32:28,856 DEBUG [RS:0;a7bef91497aa:33113 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:32:28,856 DEBUG [RS:0;a7bef91497aa:33113 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/a7bef91497aa:0, corePoolSize=2, maxPoolSize=2 2024-11-11T04:32:28,856 DEBUG [RS:0;a7bef91497aa:33113 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:32:28,856 DEBUG [RS:0;a7bef91497aa:33113 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:32:28,856 DEBUG [RS:0;a7bef91497aa:33113 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:32:28,857 DEBUG [RS:0;a7bef91497aa:33113 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:32:28,857 DEBUG [RS:0;a7bef91497aa:33113 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:32:28,857 DEBUG [RS:0;a7bef91497aa:33113 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:32:28,857 DEBUG [RS:0;a7bef91497aa:33113 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/a7bef91497aa:0, corePoolSize=3, maxPoolSize=3 2024-11-11T04:32:28,857 DEBUG [RS:0;a7bef91497aa:33113 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/a7bef91497aa:0, corePoolSize=3, maxPoolSize=3 2024-11-11T04:32:28,860 INFO [RS:0;a7bef91497aa:33113 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T04:32:28,860 INFO [RS:0;a7bef91497aa:33113 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T04:32:28,860 INFO [RS:0;a7bef91497aa:33113 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T04:32:28,861 INFO [RS:0;a7bef91497aa:33113 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-11T04:32:28,861 INFO [RS:0;a7bef91497aa:33113 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-11T04:32:28,861 INFO [RS:0;a7bef91497aa:33113 {}] hbase.ChoreService(168): Chore ScheduledChore name=a7bef91497aa,33113,1731299547737-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-11T04:32:28,882 INFO [RS:0;a7bef91497aa:33113 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-11T04:32:28,884 INFO [RS:0;a7bef91497aa:33113 {}] hbase.ChoreService(168): Chore ScheduledChore name=a7bef91497aa,33113,1731299547737-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T04:32:28,885 INFO [RS:0;a7bef91497aa:33113 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T04:32:28,885 INFO [RS:0;a7bef91497aa:33113 {}] regionserver.Replication(171): a7bef91497aa,33113,1731299547737 started 2024-11-11T04:32:28,904 INFO [RS:0;a7bef91497aa:33113 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T04:32:28,905 INFO [RS:0;a7bef91497aa:33113 {}] regionserver.HRegionServer(1482): Serving as a7bef91497aa,33113,1731299547737, RpcServer on a7bef91497aa/172.17.0.2:33113, sessionid=0x101959a09bc0001 2024-11-11T04:32:28,905 DEBUG [RS:0;a7bef91497aa:33113 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-11T04:32:28,906 DEBUG [RS:0;a7bef91497aa:33113 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager a7bef91497aa,33113,1731299547737 2024-11-11T04:32:28,906 DEBUG [RS:0;a7bef91497aa:33113 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'a7bef91497aa,33113,1731299547737' 2024-11-11T04:32:28,906 DEBUG [RS:0;a7bef91497aa:33113 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-11T04:32:28,907 DEBUG [RS:0;a7bef91497aa:33113 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-11T04:32:28,908 DEBUG [RS:0;a7bef91497aa:33113 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-11T04:32:28,908 DEBUG [RS:0;a7bef91497aa:33113 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-11T04:32:28,908 DEBUG [RS:0;a7bef91497aa:33113 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager a7bef91497aa,33113,1731299547737 2024-11-11T04:32:28,908 DEBUG [RS:0;a7bef91497aa:33113 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'a7bef91497aa,33113,1731299547737' 2024-11-11T04:32:28,908 DEBUG [RS:0;a7bef91497aa:33113 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-11T04:32:28,909 DEBUG [RS:0;a7bef91497aa:33113 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-11T04:32:28,910 DEBUG [RS:0;a7bef91497aa:33113 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-11T04:32:28,910 INFO [RS:0;a7bef91497aa:33113 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-11T04:32:28,910 INFO [RS:0;a7bef91497aa:33113 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-11T04:32:29,007 WARN [a7bef91497aa:46043 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-11T04:32:29,019 INFO [RS:0;a7bef91497aa:33113 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a7bef91497aa%2C33113%2C1731299547737, suffix=, logDir=hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/WALs/a7bef91497aa,33113,1731299547737, archiveDir=hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/oldWALs, maxLogs=32 2024-11-11T04:32:29,022 INFO [RS:0;a7bef91497aa:33113 {}] monitor.StreamSlowMonitor(122): New stream slow monitor a7bef91497aa%2C33113%2C1731299547737.1731299549022 2024-11-11T04:32:29,031 INFO [RS:0;a7bef91497aa:33113 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/WALs/a7bef91497aa,33113,1731299547737/a7bef91497aa%2C33113%2C1731299547737.1731299549022 2024-11-11T04:32:29,032 DEBUG [RS:0;a7bef91497aa:33113 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41903:41903),(127.0.0.1/127.0.0.1:33195:33195)] 2024-11-11T04:32:29,259 DEBUG [a7bef91497aa:46043 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-11T04:32:29,271 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=a7bef91497aa,33113,1731299547737 2024-11-11T04:32:29,278 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as a7bef91497aa,33113,1731299547737, state=OPENING 2024-11-11T04:32:29,284 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-11T04:32:29,286 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46043-0x101959a09bc0000, quorum=127.0.0.1:64255, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:32:29,286 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33113-0x101959a09bc0001, quorum=127.0.0.1:64255, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:32:29,287 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T04:32:29,287 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T04:32:29,289 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-11T04:32:29,290 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=a7bef91497aa,33113,1731299547737}] 2024-11-11T04:32:29,467 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-11T04:32:29,471 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38151, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-11T04:32:29,482 INFO [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-11T04:32:29,483 INFO [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T04:32:29,486 INFO [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a7bef91497aa%2C33113%2C1731299547737.meta, suffix=.meta, logDir=hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/WALs/a7bef91497aa,33113,1731299547737, archiveDir=hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/oldWALs, maxLogs=32 2024-11-11T04:32:29,488 INFO [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor a7bef91497aa%2C33113%2C1731299547737.meta.1731299549488.meta 2024-11-11T04:32:29,497 INFO [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/WALs/a7bef91497aa,33113,1731299547737/a7bef91497aa%2C33113%2C1731299547737.meta.1731299549488.meta 2024-11-11T04:32:29,500 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33195:33195),(127.0.0.1/127.0.0.1:41903:41903)] 2024-11-11T04:32:29,504 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-11T04:32:29,506 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-11T04:32:29,509 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-11T04:32:29,514 INFO [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-11T04:32:29,519 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-11T04:32:29,519 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T04:32:29,520 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-11T04:32:29,520 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-11T04:32:29,523 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-11T04:32:29,525 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-11T04:32:29,525 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:32:29,526 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T04:32:29,526 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-11T04:32:29,528 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-11T04:32:29,528 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:32:29,529 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T04:32:29,529 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-11T04:32:29,530 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-11T04:32:29,530 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:32:29,531 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T04:32:29,531 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-11T04:32:29,533 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-11T04:32:29,533 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:32:29,533 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T04:32:29,534 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-11T04:32:29,535 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/data/hbase/meta/1588230740 2024-11-11T04:32:29,538 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/data/hbase/meta/1588230740 2024-11-11T04:32:29,540 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-11T04:32:29,540 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-11T04:32:29,541 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-11T04:32:29,544 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-11T04:32:29,545 INFO [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=804380, jitterRate=0.022822439670562744}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-11T04:32:29,545 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-11T04:32:29,547 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731299549520Writing region info on filesystem at 1731299549521 (+1 ms)Initializing all the Stores at 1731299549523 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731299549523Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731299549523Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731299549523Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731299549523Cleaning up temporary data from old regions at 1731299549540 (+17 ms)Running coprocessor post-open hooks at 1731299549546 (+6 ms)Region opened successfully at 1731299549546 2024-11-11T04:32:29,554 INFO [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731299549458 2024-11-11T04:32:29,568 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-11T04:32:29,569 INFO [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-11T04:32:29,569 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=a7bef91497aa,33113,1731299547737 2024-11-11T04:32:29,571 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as a7bef91497aa,33113,1731299547737, state=OPEN 2024-11-11T04:32:29,577 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33113-0x101959a09bc0001, quorum=127.0.0.1:64255, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-11T04:32:29,577 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46043-0x101959a09bc0000, quorum=127.0.0.1:64255, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-11T04:32:29,577 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T04:32:29,577 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T04:32:29,578 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=a7bef91497aa,33113,1731299547737 2024-11-11T04:32:29,585 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-11T04:32:29,585 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=a7bef91497aa,33113,1731299547737 in 288 msec 2024-11-11T04:32:29,592 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-11T04:32:29,592 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 744 msec 2024-11-11T04:32:29,594 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-11T04:32:29,594 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-11T04:32:29,616 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-11T04:32:29,617 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=a7bef91497aa,33113,1731299547737, seqNum=-1] 2024-11-11T04:32:29,639 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T04:32:29,641 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35377, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T04:32:29,668 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.0100 sec 2024-11-11T04:32:29,668 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731299549668, completionTime=-1 2024-11-11T04:32:29,671 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-11T04:32:29,671 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-11T04:32:29,704 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-11T04:32:29,704 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731299609704 2024-11-11T04:32:29,704 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731299669704 2024-11-11T04:32:29,704 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 32 msec 2024-11-11T04:32:29,707 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a7bef91497aa,46043,1731299547044-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T04:32:29,707 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a7bef91497aa,46043,1731299547044-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T04:32:29,707 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a7bef91497aa,46043,1731299547044-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T04:32:29,709 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-a7bef91497aa:46043, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T04:32:29,709 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-11T04:32:29,710 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-11T04:32:29,717 DEBUG [master/a7bef91497aa:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-11T04:32:29,742 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.858sec 2024-11-11T04:32:29,744 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-11T04:32:29,768 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-11T04:32:29,769 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-11T04:32:29,770 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-11T04:32:29,770 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-11T04:32:29,771 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a7bef91497aa,46043,1731299547044-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-11T04:32:29,771 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a7bef91497aa,46043,1731299547044-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-11T04:32:29,781 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-11T04:32:29,782 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-11T04:32:29,783 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a7bef91497aa,46043,1731299547044-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T04:32:29,844 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4731d90b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T04:32:29,848 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-11T04:32:29,848 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-11T04:32:29,853 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request a7bef91497aa,46043,-1 for getting cluster id 2024-11-11T04:32:29,857 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-11T04:32:29,867 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'd2c8e528-0842-4dbe-b712-56be6fb899e6' 2024-11-11T04:32:29,870 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-11T04:32:29,870 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "d2c8e528-0842-4dbe-b712-56be6fb899e6" 2024-11-11T04:32:29,870 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6938075e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T04:32:29,870 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [a7bef91497aa,46043,-1] 2024-11-11T04:32:29,873 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-11T04:32:29,875 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T04:32:29,877 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49628, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-11T04:32:29,880 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2eb3700d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T04:32:29,881 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-11T04:32:29,890 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=a7bef91497aa,33113,1731299547737, seqNum=-1] 2024-11-11T04:32:29,891 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T04:32:29,894 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37014, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T04:32:29,916 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=a7bef91497aa,46043,1731299547044 2024-11-11T04:32:29,916 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T04:32:29,924 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-11T04:32:29,928 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-11T04:32:29,933 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncConnectionImpl(321): The fetched master address is a7bef91497aa,46043,1731299547044 2024-11-11T04:32:29,936 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@7fce75f8 2024-11-11T04:32:29,937 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-11T04:32:29,940 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49638, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-11T04:32:29,942 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46043 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-11T04:32:29,942 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46043 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-11T04:32:29,946 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46043 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testSlowSyncLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-11T04:32:29,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46043 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling 2024-11-11T04:32:29,957 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-11T04:32:29,959 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46043 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testSlowSyncLogRolling" procId is: 4 2024-11-11T04:32:29,960 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:32:29,962 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-11T04:32:29,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-11T04:32:30,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43443 is added to blk_1073741835_1011 (size=389) 2024-11-11T04:32:30,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45519 is added to blk_1073741835_1011 (size=389) 2024-11-11T04:32:30,012 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 6148582bdb35bae8083a82599a576ebb, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1731299549941.6148582bdb35bae8083a82599a576ebb.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testSlowSyncLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5 2024-11-11T04:32:30,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43443 is added to blk_1073741836_1012 (size=72) 2024-11-11T04:32:30,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45519 is added to blk_1073741836_1012 (size=72) 2024-11-11T04:32:30,024 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1731299549941.6148582bdb35bae8083a82599a576ebb.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T04:32:30,024 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 6148582bdb35bae8083a82599a576ebb, disabling compactions & flushes 2024-11-11T04:32:30,024 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1731299549941.6148582bdb35bae8083a82599a576ebb. 2024-11-11T04:32:30,024 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1731299549941.6148582bdb35bae8083a82599a576ebb. 2024-11-11T04:32:30,024 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1731299549941.6148582bdb35bae8083a82599a576ebb. after waiting 0 ms 2024-11-11T04:32:30,024 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1731299549941.6148582bdb35bae8083a82599a576ebb. 2024-11-11T04:32:30,024 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1731299549941.6148582bdb35bae8083a82599a576ebb. 2024-11-11T04:32:30,024 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 6148582bdb35bae8083a82599a576ebb: Waiting for close lock at 1731299550024Disabling compacts and flushes for region at 1731299550024Disabling writes for close at 1731299550024Writing region close event to WAL at 1731299550024Closed at 1731299550024 2024-11-11T04:32:30,027 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-11T04:32:30,032 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testSlowSyncLogRolling,,1731299549941.6148582bdb35bae8083a82599a576ebb.","families":{"info":[{"qualifier":"regioninfo","vlen":71,"tag":[],"timestamp":"1731299550027"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731299550027"}]},"ts":"1731299550027"} 2024-11-11T04:32:30,037 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-11T04:32:30,039 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-11T04:32:30,042 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731299550039"}]},"ts":"1731299550039"} 2024-11-11T04:32:30,048 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLING in hbase:meta 2024-11-11T04:32:30,050 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=6148582bdb35bae8083a82599a576ebb, ASSIGN}] 2024-11-11T04:32:30,053 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=6148582bdb35bae8083a82599a576ebb, ASSIGN 2024-11-11T04:32:30,055 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=6148582bdb35bae8083a82599a576ebb, ASSIGN; state=OFFLINE, location=a7bef91497aa,33113,1731299547737; forceNewPlan=false, retain=false 2024-11-11T04:32:30,206 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=6148582bdb35bae8083a82599a576ebb, regionState=OPENING, regionLocation=a7bef91497aa,33113,1731299547737 2024-11-11T04:32:30,211 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=6148582bdb35bae8083a82599a576ebb, ASSIGN because future has completed 2024-11-11T04:32:30,212 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 6148582bdb35bae8083a82599a576ebb, server=a7bef91497aa,33113,1731299547737}] 2024-11-11T04:32:30,374 INFO [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testSlowSyncLogRolling,,1731299549941.6148582bdb35bae8083a82599a576ebb. 2024-11-11T04:32:30,374 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 6148582bdb35bae8083a82599a576ebb, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1731299549941.6148582bdb35bae8083a82599a576ebb.', STARTKEY => '', ENDKEY => ''} 2024-11-11T04:32:30,375 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testSlowSyncLogRolling 6148582bdb35bae8083a82599a576ebb 2024-11-11T04:32:30,375 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1731299549941.6148582bdb35bae8083a82599a576ebb.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T04:32:30,375 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 6148582bdb35bae8083a82599a576ebb 2024-11-11T04:32:30,375 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 6148582bdb35bae8083a82599a576ebb 2024-11-11T04:32:30,377 INFO [StoreOpener-6148582bdb35bae8083a82599a576ebb-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 6148582bdb35bae8083a82599a576ebb 2024-11-11T04:32:30,380 INFO [StoreOpener-6148582bdb35bae8083a82599a576ebb-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6148582bdb35bae8083a82599a576ebb columnFamilyName info 2024-11-11T04:32:30,381 DEBUG [StoreOpener-6148582bdb35bae8083a82599a576ebb-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:32:30,382 INFO [StoreOpener-6148582bdb35bae8083a82599a576ebb-1 {}] regionserver.HStore(327): Store=6148582bdb35bae8083a82599a576ebb/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T04:32:30,382 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 6148582bdb35bae8083a82599a576ebb 2024-11-11T04:32:30,384 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/data/default/TestLogRolling-testSlowSyncLogRolling/6148582bdb35bae8083a82599a576ebb 2024-11-11T04:32:30,384 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/data/default/TestLogRolling-testSlowSyncLogRolling/6148582bdb35bae8083a82599a576ebb 2024-11-11T04:32:30,385 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 6148582bdb35bae8083a82599a576ebb 2024-11-11T04:32:30,385 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 6148582bdb35bae8083a82599a576ebb 2024-11-11T04:32:30,388 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 6148582bdb35bae8083a82599a576ebb 2024-11-11T04:32:30,391 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/data/default/TestLogRolling-testSlowSyncLogRolling/6148582bdb35bae8083a82599a576ebb/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T04:32:30,392 INFO [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 6148582bdb35bae8083a82599a576ebb; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=714596, jitterRate=-0.09134519100189209}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-11T04:32:30,392 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 6148582bdb35bae8083a82599a576ebb 2024-11-11T04:32:30,393 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 6148582bdb35bae8083a82599a576ebb: Running coprocessor pre-open hook at 1731299550375Writing region info on filesystem at 1731299550375Initializing all the Stores at 1731299550377 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731299550377Cleaning up temporary data from old regions at 1731299550385 (+8 ms)Running coprocessor post-open hooks at 1731299550392 (+7 ms)Region opened successfully at 1731299550393 (+1 ms) 2024-11-11T04:32:30,395 INFO [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testSlowSyncLogRolling,,1731299549941.6148582bdb35bae8083a82599a576ebb., pid=6, masterSystemTime=1731299550367 2024-11-11T04:32:30,399 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testSlowSyncLogRolling,,1731299549941.6148582bdb35bae8083a82599a576ebb. 2024-11-11T04:32:30,399 INFO [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testSlowSyncLogRolling,,1731299549941.6148582bdb35bae8083a82599a576ebb. 2024-11-11T04:32:30,400 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=6148582bdb35bae8083a82599a576ebb, regionState=OPEN, openSeqNum=2, regionLocation=a7bef91497aa,33113,1731299547737 2024-11-11T04:32:30,404 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 6148582bdb35bae8083a82599a576ebb, server=a7bef91497aa,33113,1731299547737 because future has completed 2024-11-11T04:32:30,410 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-11T04:32:30,411 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 6148582bdb35bae8083a82599a576ebb, server=a7bef91497aa,33113,1731299547737 in 194 msec 2024-11-11T04:32:30,415 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-11T04:32:30,416 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=6148582bdb35bae8083a82599a576ebb, ASSIGN in 361 msec 2024-11-11T04:32:30,417 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-11T04:32:30,417 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731299550417"}]},"ts":"1731299550417"} 2024-11-11T04:32:30,421 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLED in hbase:meta 2024-11-11T04:32:30,423 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-11T04:32:30,426 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling in 474 msec 2024-11-11T04:32:35,024 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-11T04:32:35,070 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-11T04:32:35,071 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testSlowSyncLogRolling' 2024-11-11T04:32:37,457 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-11T04:32:37,458 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-11T04:32:37,459 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-11T04:32:37,459 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-11T04:32:37,460 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-11T04:32:37,461 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-11T04:32:37,461 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-11T04:32:37,461 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-11T04:32:40,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-11T04:32:40,046 INFO [RPCClient-NioEventLoopGroup-4-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testSlowSyncLogRolling completed 2024-11-11T04:32:40,049 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testSlowSyncLogRolling,, stopping at row=TestLogRolling-testSlowSyncLogRolling ,, for max=2147483647 with caching=100 2024-11-11T04:32:40,055 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testSlowSyncLogRolling 2024-11-11T04:32:40,056 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testSlowSyncLogRolling,,1731299549941.6148582bdb35bae8083a82599a576ebb. 2024-11-11T04:32:40,057 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor a7bef91497aa%2C33113%2C1731299547737.1731299560057 2024-11-11T04:32:40,066 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:32:40,066 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:32:40,066 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:32:40,067 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:32:40,067 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:32:40,067 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/WALs/a7bef91497aa,33113,1731299547737/a7bef91497aa%2C33113%2C1731299547737.1731299549022 with entries=1, filesize=443 B; new WAL /user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/WALs/a7bef91497aa,33113,1731299547737/a7bef91497aa%2C33113%2C1731299547737.1731299560057 2024-11-11T04:32:40,068 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33195:33195),(127.0.0.1/127.0.0.1:41903:41903)] 2024-11-11T04:32:40,069 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/WALs/a7bef91497aa,33113,1731299547737/a7bef91497aa%2C33113%2C1731299547737.1731299549022 is not closed yet, will try archiving it next time 2024-11-11T04:32:40,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43443 is added to blk_1073741833_1009 (size=451) 2024-11-11T04:32:40,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45519 is added to blk_1073741833_1009 (size=451) 2024-11-11T04:32:40,072 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/WALs/a7bef91497aa,33113,1731299547737/a7bef91497aa%2C33113%2C1731299547737.1731299549022 to hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/oldWALs/a7bef91497aa%2C33113%2C1731299547737.1731299549022 2024-11-11T04:32:40,077 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testSlowSyncLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testSlowSyncLogRolling,,1731299549941.6148582bdb35bae8083a82599a576ebb., hostname=a7bef91497aa,33113,1731299547737, seqNum=2] 2024-11-11T04:32:52,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33113 {}] regionserver.HRegion(8855): Flush requested on 6148582bdb35bae8083a82599a576ebb 2024-11-11T04:32:52,112 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 6148582bdb35bae8083a82599a576ebb 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-11T04:32:52,187 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/data/default/TestLogRolling-testSlowSyncLogRolling/6148582bdb35bae8083a82599a576ebb/.tmp/info/1917642b9d7e43a1bae2b026c993711e is 1080, key is row0001/info:/1731299560080/Put/seqid=0 2024-11-11T04:32:52,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45519 is added to blk_1073741838_1014 (size=12509) 2024-11-11T04:32:52,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43443 is added to blk_1073741838_1014 (size=12509) 2024-11-11T04:32:52,202 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/data/default/TestLogRolling-testSlowSyncLogRolling/6148582bdb35bae8083a82599a576ebb/.tmp/info/1917642b9d7e43a1bae2b026c993711e 2024-11-11T04:32:52,250 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/data/default/TestLogRolling-testSlowSyncLogRolling/6148582bdb35bae8083a82599a576ebb/.tmp/info/1917642b9d7e43a1bae2b026c993711e as hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/data/default/TestLogRolling-testSlowSyncLogRolling/6148582bdb35bae8083a82599a576ebb/info/1917642b9d7e43a1bae2b026c993711e 2024-11-11T04:32:52,262 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/data/default/TestLogRolling-testSlowSyncLogRolling/6148582bdb35bae8083a82599a576ebb/info/1917642b9d7e43a1bae2b026c993711e, entries=7, sequenceid=11, filesize=12.2 K 2024-11-11T04:32:52,271 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 6148582bdb35bae8083a82599a576ebb in 157ms, sequenceid=11, compaction requested=false 2024-11-11T04:32:52,272 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 6148582bdb35bae8083a82599a576ebb: 2024-11-11T04:32:56,131 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-11T04:33:00,121 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor a7bef91497aa%2C33113%2C1731299547737.1731299580121 2024-11-11T04:33:00,330 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 206 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43443,DS-9dfaa11f-93fb-4718-9634-4c09b6b94647,DISK], DatanodeInfoWithStorage[127.0.0.1:45519,DS-267b11f2-8888-4f53-9941-c7aec908c8b1,DISK]] 2024-11-11T04:33:00,330 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:33:00,330 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:33:00,330 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:33:00,331 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:33:00,331 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:33:00,331 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/WALs/a7bef91497aa,33113,1731299547737/a7bef91497aa%2C33113%2C1731299547737.1731299560057 with entries=12, filesize=12.10 KB; new WAL /user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/WALs/a7bef91497aa,33113,1731299547737/a7bef91497aa%2C33113%2C1731299547737.1731299580121 2024-11-11T04:33:00,332 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41903:41903),(127.0.0.1/127.0.0.1:33195:33195)] 2024-11-11T04:33:00,332 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/WALs/a7bef91497aa,33113,1731299547737/a7bef91497aa%2C33113%2C1731299547737.1731299560057 is not closed yet, will try archiving it next time 2024-11-11T04:33:00,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45519 is added to blk_1073741837_1013 (size=12399) 2024-11-11T04:33:00,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43443 is added to blk_1073741837_1013 (size=12399) 2024-11-11T04:33:00,535 INFO [FSHLog-0-hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5-prefix:a7bef91497aa,33113,1731299547737 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45519,DS-267b11f2-8888-4f53-9941-c7aec908c8b1,DISK], DatanodeInfoWithStorage[127.0.0.1:43443,DS-9dfaa11f-93fb-4718-9634-4c09b6b94647,DISK]] 2024-11-11T04:33:02,739 INFO [FSHLog-0-hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5-prefix:a7bef91497aa,33113,1731299547737 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45519,DS-267b11f2-8888-4f53-9941-c7aec908c8b1,DISK], DatanodeInfoWithStorage[127.0.0.1:43443,DS-9dfaa11f-93fb-4718-9634-4c09b6b94647,DISK]] 2024-11-11T04:33:04,943 INFO [FSHLog-0-hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5-prefix:a7bef91497aa,33113,1731299547737 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45519,DS-267b11f2-8888-4f53-9941-c7aec908c8b1,DISK], DatanodeInfoWithStorage[127.0.0.1:43443,DS-9dfaa11f-93fb-4718-9634-4c09b6b94647,DISK]] 2024-11-11T04:33:07,147 INFO [FSHLog-0-hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5-prefix:a7bef91497aa,33113,1731299547737 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45519,DS-267b11f2-8888-4f53-9941-c7aec908c8b1,DISK], DatanodeInfoWithStorage[127.0.0.1:43443,DS-9dfaa11f-93fb-4718-9634-4c09b6b94647,DISK]] 2024-11-11T04:33:07,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33113 {}] regionserver.HRegion(8855): Flush requested on 6148582bdb35bae8083a82599a576ebb 2024-11-11T04:33:07,148 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 6148582bdb35bae8083a82599a576ebb 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-11T04:33:07,350 INFO [FSHLog-0-hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5-prefix:a7bef91497aa,33113,1731299547737 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45519,DS-267b11f2-8888-4f53-9941-c7aec908c8b1,DISK], DatanodeInfoWithStorage[127.0.0.1:43443,DS-9dfaa11f-93fb-4718-9634-4c09b6b94647,DISK]] 2024-11-11T04:33:07,356 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/data/default/TestLogRolling-testSlowSyncLogRolling/6148582bdb35bae8083a82599a576ebb/.tmp/info/128a0bcf75e345da822c3f20303b7b6b is 1080, key is row0008/info:/1731299574111/Put/seqid=0 2024-11-11T04:33:07,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45519 is added to blk_1073741840_1016 (size=12509) 2024-11-11T04:33:07,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43443 is added to blk_1073741840_1016 (size=12509) 2024-11-11T04:33:07,364 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/data/default/TestLogRolling-testSlowSyncLogRolling/6148582bdb35bae8083a82599a576ebb/.tmp/info/128a0bcf75e345da822c3f20303b7b6b 2024-11-11T04:33:07,375 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/data/default/TestLogRolling-testSlowSyncLogRolling/6148582bdb35bae8083a82599a576ebb/.tmp/info/128a0bcf75e345da822c3f20303b7b6b as hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/data/default/TestLogRolling-testSlowSyncLogRolling/6148582bdb35bae8083a82599a576ebb/info/128a0bcf75e345da822c3f20303b7b6b 2024-11-11T04:33:07,384 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/data/default/TestLogRolling-testSlowSyncLogRolling/6148582bdb35bae8083a82599a576ebb/info/128a0bcf75e345da822c3f20303b7b6b, entries=7, sequenceid=21, filesize=12.2 K 2024-11-11T04:33:07,586 INFO [FSHLog-0-hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5-prefix:a7bef91497aa,33113,1731299547737 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45519,DS-267b11f2-8888-4f53-9941-c7aec908c8b1,DISK], DatanodeInfoWithStorage[127.0.0.1:43443,DS-9dfaa11f-93fb-4718-9634-4c09b6b94647,DISK]] 2024-11-11T04:33:07,586 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 6148582bdb35bae8083a82599a576ebb in 437ms, sequenceid=21, compaction requested=false 2024-11-11T04:33:07,586 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 6148582bdb35bae8083a82599a576ebb: 2024-11-11T04:33:07,586 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=24.4 K, sizeToCheck=16.0 K 2024-11-11T04:33:07,586 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T04:33:07,587 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/data/default/TestLogRolling-testSlowSyncLogRolling/6148582bdb35bae8083a82599a576ebb/info/1917642b9d7e43a1bae2b026c993711e because midkey is the same as first or last row 2024-11-11T04:33:09,351 INFO [FSHLog-0-hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5-prefix:a7bef91497aa,33113,1731299547737 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45519,DS-267b11f2-8888-4f53-9941-c7aec908c8b1,DISK], DatanodeInfoWithStorage[127.0.0.1:43443,DS-9dfaa11f-93fb-4718-9634-4c09b6b94647,DISK]] 2024-11-11T04:33:09,798 INFO [master/a7bef91497aa:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-11T04:33:09,798 INFO [master/a7bef91497aa:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-11T04:33:11,555 INFO [FSHLog-0-hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5-prefix:a7bef91497aa,33113,1731299547737 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45519,DS-267b11f2-8888-4f53-9941-c7aec908c8b1,DISK], DatanodeInfoWithStorage[127.0.0.1:43443,DS-9dfaa11f-93fb-4718-9634-4c09b6b94647,DISK]] 2024-11-11T04:33:11,557 WARN [FSHLog-0-hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5-prefix:a7bef91497aa,33113,1731299547737 {}] wal.AbstractFSWAL(2201): Requesting log roll because we exceeded slow sync threshold; count=8, threshold=5, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45519,DS-267b11f2-8888-4f53-9941-c7aec908c8b1,DISK], DatanodeInfoWithStorage[127.0.0.1:43443,DS-9dfaa11f-93fb-4718-9634-4c09b6b94647,DISK]] 2024-11-11T04:33:11,558 DEBUG [regionserver/a7bef91497aa:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog a7bef91497aa%2C33113%2C1731299547737:(num 1731299580121) roll requested 2024-11-11T04:33:11,559 INFO [regionserver/a7bef91497aa:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor a7bef91497aa%2C33113%2C1731299547737.1731299591559 2024-11-11T04:33:11,766 INFO [regionserver/a7bef91497aa:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 205 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45519,DS-267b11f2-8888-4f53-9941-c7aec908c8b1,DISK], DatanodeInfoWithStorage[127.0.0.1:43443,DS-9dfaa11f-93fb-4718-9634-4c09b6b94647,DISK]] 2024-11-11T04:33:11,767 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:33:11,767 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:33:11,767 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:33:11,767 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:33:11,767 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:33:11,768 INFO [regionserver/a7bef91497aa:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/WALs/a7bef91497aa,33113,1731299547737/a7bef91497aa%2C33113%2C1731299547737.1731299580121 with entries=8, filesize=7.55 KB; new WAL /user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/WALs/a7bef91497aa,33113,1731299547737/a7bef91497aa%2C33113%2C1731299547737.1731299591559 2024-11-11T04:33:11,768 DEBUG [regionserver/a7bef91497aa:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41903:41903),(127.0.0.1/127.0.0.1:33195:33195)] 2024-11-11T04:33:11,768 DEBUG [regionserver/a7bef91497aa:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/WALs/a7bef91497aa,33113,1731299547737/a7bef91497aa%2C33113%2C1731299547737.1731299580121 is not closed yet, will try archiving it next time 2024-11-11T04:33:11,769 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/WALs/a7bef91497aa,33113,1731299547737/a7bef91497aa%2C33113%2C1731299547737.1731299560057 to hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/oldWALs/a7bef91497aa%2C33113%2C1731299547737.1731299560057 2024-11-11T04:33:11,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43443 is added to blk_1073741839_1015 (size=7739) 2024-11-11T04:33:11,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45519 is added to blk_1073741839_1015 (size=7739) 2024-11-11T04:33:13,759 INFO [FSHLog-0-hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5-prefix:a7bef91497aa,33113,1731299547737 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45519,DS-267b11f2-8888-4f53-9941-c7aec908c8b1,DISK], DatanodeInfoWithStorage[127.0.0.1:43443,DS-9dfaa11f-93fb-4718-9634-4c09b6b94647,DISK]] 2024-11-11T04:33:15,375 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 6148582bdb35bae8083a82599a576ebb, had cached 0 bytes from a total of 25018 2024-11-11T04:33:15,963 INFO [FSHLog-0-hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5-prefix:a7bef91497aa,33113,1731299547737 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45519,DS-267b11f2-8888-4f53-9941-c7aec908c8b1,DISK], DatanodeInfoWithStorage[127.0.0.1:43443,DS-9dfaa11f-93fb-4718-9634-4c09b6b94647,DISK]] 2024-11-11T04:33:18,167 INFO [FSHLog-0-hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5-prefix:a7bef91497aa,33113,1731299547737 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45519,DS-267b11f2-8888-4f53-9941-c7aec908c8b1,DISK], DatanodeInfoWithStorage[127.0.0.1:43443,DS-9dfaa11f-93fb-4718-9634-4c09b6b94647,DISK]] 2024-11-11T04:33:20,371 INFO [FSHLog-0-hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5-prefix:a7bef91497aa,33113,1731299547737 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45519,DS-267b11f2-8888-4f53-9941-c7aec908c8b1,DISK], DatanodeInfoWithStorage[127.0.0.1:43443,DS-9dfaa11f-93fb-4718-9634-4c09b6b94647,DISK]] 2024-11-11T04:33:22,373 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-11T04:33:22,373 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor a7bef91497aa%2C33113%2C1731299547737.1731299602373 2024-11-11T04:33:26,132 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-11T04:33:27,382 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 5005 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45519,DS-267b11f2-8888-4f53-9941-c7aec908c8b1,DISK], DatanodeInfoWithStorage[127.0.0.1:43443,DS-9dfaa11f-93fb-4718-9634-4c09b6b94647,DISK]] 2024-11-11T04:33:27,383 WARN [Time-limited test {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5005 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45519,DS-267b11f2-8888-4f53-9941-c7aec908c8b1,DISK], DatanodeInfoWithStorage[127.0.0.1:43443,DS-9dfaa11f-93fb-4718-9634-4c09b6b94647,DISK]] 2024-11-11T04:33:27,383 DEBUG [regionserver/a7bef91497aa:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog a7bef91497aa%2C33113%2C1731299547737:(num 1731299602373) roll requested 2024-11-11T04:33:27,384 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:33:27,384 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:33:27,384 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:33:27,384 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:33:27,384 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:33:27,384 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/WALs/a7bef91497aa,33113,1731299547737/a7bef91497aa%2C33113%2C1731299547737.1731299591559 with entries=4, filesize=4.63 KB; new WAL /user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/WALs/a7bef91497aa,33113,1731299547737/a7bef91497aa%2C33113%2C1731299547737.1731299602373 2024-11-11T04:33:27,387 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33195:33195),(127.0.0.1/127.0.0.1:41903:41903)] 2024-11-11T04:33:27,387 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/WALs/a7bef91497aa,33113,1731299547737/a7bef91497aa%2C33113%2C1731299547737.1731299591559 is not closed yet, will try archiving it next time 2024-11-11T04:33:27,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43443 is added to blk_1073741841_1017 (size=4753) 2024-11-11T04:33:27,387 INFO [regionserver/a7bef91497aa:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor a7bef91497aa%2C33113%2C1731299547737.1731299607387 2024-11-11T04:33:27,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45519 is added to blk_1073741841_1017 (size=4753) 2024-11-11T04:33:32,390 INFO [FSHLog-0-hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5-prefix:a7bef91497aa,33113,1731299547737 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43443,DS-9dfaa11f-93fb-4718-9634-4c09b6b94647,DISK], DatanodeInfoWithStorage[127.0.0.1:45519,DS-267b11f2-8888-4f53-9941-c7aec908c8b1,DISK]] 2024-11-11T04:33:32,390 WARN [FSHLog-0-hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5-prefix:a7bef91497aa,33113,1731299547737 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43443,DS-9dfaa11f-93fb-4718-9634-4c09b6b94647,DISK], DatanodeInfoWithStorage[127.0.0.1:45519,DS-267b11f2-8888-4f53-9941-c7aec908c8b1,DISK]] 2024-11-11T04:33:32,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33113 {}] regionserver.HRegion(8855): Flush requested on 6148582bdb35bae8083a82599a576ebb 2024-11-11T04:33:32,391 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 6148582bdb35bae8083a82599a576ebb 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-11T04:33:32,397 INFO [regionserver/a7bef91497aa:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5005 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43443,DS-9dfaa11f-93fb-4718-9634-4c09b6b94647,DISK], DatanodeInfoWithStorage[127.0.0.1:45519,DS-267b11f2-8888-4f53-9941-c7aec908c8b1,DISK]] 2024-11-11T04:33:32,397 WARN [regionserver/a7bef91497aa:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5005 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43443,DS-9dfaa11f-93fb-4718-9634-4c09b6b94647,DISK], DatanodeInfoWithStorage[127.0.0.1:45519,DS-267b11f2-8888-4f53-9941-c7aec908c8b1,DISK]] 2024-11-11T04:33:34,391 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-11T04:33:37,393 INFO [FSHLog-0-hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5-prefix:a7bef91497aa,33113,1731299547737 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43443,DS-9dfaa11f-93fb-4718-9634-4c09b6b94647,DISK], DatanodeInfoWithStorage[127.0.0.1:45519,DS-267b11f2-8888-4f53-9941-c7aec908c8b1,DISK]] 2024-11-11T04:33:37,393 WARN [FSHLog-0-hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5-prefix:a7bef91497aa,33113,1731299547737 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43443,DS-9dfaa11f-93fb-4718-9634-4c09b6b94647,DISK], DatanodeInfoWithStorage[127.0.0.1:45519,DS-267b11f2-8888-4f53-9941-c7aec908c8b1,DISK]] 2024-11-11T04:33:37,393 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:33:37,393 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:33:37,394 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:33:37,394 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:33:37,394 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:33:37,394 INFO [regionserver/a7bef91497aa:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/WALs/a7bef91497aa,33113,1731299547737/a7bef91497aa%2C33113%2C1731299547737.1731299602373 with entries=2, filesize=1.52 KB; new WAL /user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/WALs/a7bef91497aa,33113,1731299547737/a7bef91497aa%2C33113%2C1731299547737.1731299607387 2024-11-11T04:33:37,395 DEBUG [regionserver/a7bef91497aa:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41903:41903),(127.0.0.1/127.0.0.1:33195:33195)] 2024-11-11T04:33:37,395 DEBUG [regionserver/a7bef91497aa:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/WALs/a7bef91497aa,33113,1731299547737/a7bef91497aa%2C33113%2C1731299547737.1731299602373 is not closed yet, will try archiving it next time 2024-11-11T04:33:37,396 DEBUG [regionserver/a7bef91497aa:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog a7bef91497aa%2C33113%2C1731299547737:(num 1731299607387) roll requested 2024-11-11T04:33:37,396 INFO [regionserver/a7bef91497aa:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor a7bef91497aa%2C33113%2C1731299547737.1731299617396 2024-11-11T04:33:37,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45519 is added to blk_1073741842_1018 (size=1569) 2024-11-11T04:33:37,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43443 is added to blk_1073741842_1018 (size=1569) 2024-11-11T04:33:37,400 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/data/default/TestLogRolling-testSlowSyncLogRolling/6148582bdb35bae8083a82599a576ebb/.tmp/info/7fcf050df02d4367b08dcee752a7abb2 is 1080, key is row0015/info:/1731299589150/Put/seqid=0 2024-11-11T04:33:37,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45519 is added to blk_1073741844_1020 (size=12509) 2024-11-11T04:33:37,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43443 is added to blk_1073741844_1020 (size=12509) 2024-11-11T04:33:37,408 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=31 (bloomFilter=true), to=hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/data/default/TestLogRolling-testSlowSyncLogRolling/6148582bdb35bae8083a82599a576ebb/.tmp/info/7fcf050df02d4367b08dcee752a7abb2 2024-11-11T04:33:37,418 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/data/default/TestLogRolling-testSlowSyncLogRolling/6148582bdb35bae8083a82599a576ebb/.tmp/info/7fcf050df02d4367b08dcee752a7abb2 as hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/data/default/TestLogRolling-testSlowSyncLogRolling/6148582bdb35bae8083a82599a576ebb/info/7fcf050df02d4367b08dcee752a7abb2 2024-11-11T04:33:37,428 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/data/default/TestLogRolling-testSlowSyncLogRolling/6148582bdb35bae8083a82599a576ebb/info/7fcf050df02d4367b08dcee752a7abb2, entries=7, sequenceid=31, filesize=12.2 K 2024-11-11T04:33:42,403 INFO [regionserver/a7bef91497aa:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5005 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45519,DS-267b11f2-8888-4f53-9941-c7aec908c8b1,DISK], DatanodeInfoWithStorage[127.0.0.1:43443,DS-9dfaa11f-93fb-4718-9634-4c09b6b94647,DISK]] 2024-11-11T04:33:42,403 WARN [regionserver/a7bef91497aa:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5005 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45519,DS-267b11f2-8888-4f53-9941-c7aec908c8b1,DISK], DatanodeInfoWithStorage[127.0.0.1:43443,DS-9dfaa11f-93fb-4718-9634-4c09b6b94647,DISK]] 2024-11-11T04:33:42,429 INFO [FSHLog-0-hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5-prefix:a7bef91497aa,33113,1731299547737 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45519,DS-267b11f2-8888-4f53-9941-c7aec908c8b1,DISK], DatanodeInfoWithStorage[127.0.0.1:43443,DS-9dfaa11f-93fb-4718-9634-4c09b6b94647,DISK]] 2024-11-11T04:33:42,429 WARN [FSHLog-0-hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5-prefix:a7bef91497aa,33113,1731299547737 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5000 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45519,DS-267b11f2-8888-4f53-9941-c7aec908c8b1,DISK], DatanodeInfoWithStorage[127.0.0.1:43443,DS-9dfaa11f-93fb-4718-9634-4c09b6b94647,DISK]] 2024-11-11T04:33:42,429 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 6148582bdb35bae8083a82599a576ebb in 10038ms, sequenceid=31, compaction requested=true 2024-11-11T04:33:42,429 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:33:42,429 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 6148582bdb35bae8083a82599a576ebb: 2024-11-11T04:33:42,430 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:33:42,430 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=36.6 K, sizeToCheck=16.0 K 2024-11-11T04:33:42,430 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T04:33:42,430 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:33:42,430 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/data/default/TestLogRolling-testSlowSyncLogRolling/6148582bdb35bae8083a82599a576ebb/info/1917642b9d7e43a1bae2b026c993711e because midkey is the same as first or last row 2024-11-11T04:33:42,430 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:33:42,430 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:33:42,430 INFO [regionserver/a7bef91497aa:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/WALs/a7bef91497aa,33113,1731299547737/a7bef91497aa%2C33113%2C1731299547737.1731299607387 with entries=1, filesize=430 B; new WAL /user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/WALs/a7bef91497aa,33113,1731299547737/a7bef91497aa%2C33113%2C1731299547737.1731299617396 2024-11-11T04:33:42,431 DEBUG [regionserver/a7bef91497aa:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33195:33195),(127.0.0.1/127.0.0.1:41903:41903)] 2024-11-11T04:33:42,431 DEBUG [regionserver/a7bef91497aa:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/WALs/a7bef91497aa,33113,1731299547737/a7bef91497aa%2C33113%2C1731299547737.1731299607387 is not closed yet, will try archiving it next time 2024-11-11T04:33:42,431 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/WALs/a7bef91497aa,33113,1731299547737/a7bef91497aa%2C33113%2C1731299547737.1731299580121 to hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/oldWALs/a7bef91497aa%2C33113%2C1731299547737.1731299580121 2024-11-11T04:33:42,431 DEBUG [regionserver/a7bef91497aa:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog a7bef91497aa%2C33113%2C1731299547737:(num 1731299622431) roll requested 2024-11-11T04:33:42,432 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor a7bef91497aa%2C33113%2C1731299547737.1731299622431 2024-11-11T04:33:42,432 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6148582bdb35bae8083a82599a576ebb:info, priority=-2147483648, current under compaction store size is 1 2024-11-11T04:33:42,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43443 is added to blk_1073741843_1019 (size=438) 2024-11-11T04:33:42,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45519 is added to blk_1073741843_1019 (size=438) 2024-11-11T04:33:42,434 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/WALs/a7bef91497aa,33113,1731299547737/a7bef91497aa%2C33113%2C1731299547737.1731299591559 to hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/oldWALs/a7bef91497aa%2C33113%2C1731299547737.1731299591559 2024-11-11T04:33:42,435 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T04:33:42,435 DEBUG [RS:0;a7bef91497aa:33113-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T04:33:42,436 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/WALs/a7bef91497aa,33113,1731299547737/a7bef91497aa%2C33113%2C1731299547737.1731299602373 to hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/oldWALs/a7bef91497aa%2C33113%2C1731299547737.1731299602373 2024-11-11T04:33:42,437 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/WALs/a7bef91497aa,33113,1731299547737/a7bef91497aa%2C33113%2C1731299547737.1731299607387 to hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/oldWALs/a7bef91497aa%2C33113%2C1731299547737.1731299607387 2024-11-11T04:33:42,438 DEBUG [RS:0;a7bef91497aa:33113-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37527 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T04:33:42,439 DEBUG [RS:0;a7bef91497aa:33113-shortCompactions-0 {}] regionserver.HStore(1541): 6148582bdb35bae8083a82599a576ebb/info is initiating minor compaction (all files) 2024-11-11T04:33:42,440 INFO [RS:0;a7bef91497aa:33113-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 6148582bdb35bae8083a82599a576ebb/info in TestLogRolling-testSlowSyncLogRolling,,1731299549941.6148582bdb35bae8083a82599a576ebb. 2024-11-11T04:33:42,440 INFO [RS:0;a7bef91497aa:33113-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/data/default/TestLogRolling-testSlowSyncLogRolling/6148582bdb35bae8083a82599a576ebb/info/1917642b9d7e43a1bae2b026c993711e, hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/data/default/TestLogRolling-testSlowSyncLogRolling/6148582bdb35bae8083a82599a576ebb/info/128a0bcf75e345da822c3f20303b7b6b, hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/data/default/TestLogRolling-testSlowSyncLogRolling/6148582bdb35bae8083a82599a576ebb/info/7fcf050df02d4367b08dcee752a7abb2] into tmpdir=hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/data/default/TestLogRolling-testSlowSyncLogRolling/6148582bdb35bae8083a82599a576ebb/.tmp, totalSize=36.6 K 2024-11-11T04:33:42,441 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:33:42,441 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:33:42,442 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:33:42,442 DEBUG [RS:0;a7bef91497aa:33113-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1917642b9d7e43a1bae2b026c993711e, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731299560080 2024-11-11T04:33:42,442 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:33:42,442 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:33:42,442 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/WALs/a7bef91497aa,33113,1731299547737/a7bef91497aa%2C33113%2C1731299547737.1731299617396 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/WALs/a7bef91497aa,33113,1731299547737/a7bef91497aa%2C33113%2C1731299547737.1731299622431 2024-11-11T04:33:42,442 DEBUG [RS:0;a7bef91497aa:33113-shortCompactions-0 {}] compactions.Compactor(225): Compacting 128a0bcf75e345da822c3f20303b7b6b, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=21, earliestPutTs=1731299574111 2024-11-11T04:33:42,443 DEBUG [RS:0;a7bef91497aa:33113-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7fcf050df02d4367b08dcee752a7abb2, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=31, earliestPutTs=1731299589150 2024-11-11T04:33:42,444 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33195:33195),(127.0.0.1/127.0.0.1:41903:41903)] 2024-11-11T04:33:42,444 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/WALs/a7bef91497aa,33113,1731299547737/a7bef91497aa%2C33113%2C1731299547737.1731299617396 is not closed yet, will try archiving it next time 2024-11-11T04:33:42,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45519 is added to blk_1073741845_1021 (size=93) 2024-11-11T04:33:42,445 INFO [regionserver/a7bef91497aa:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor a7bef91497aa%2C33113%2C1731299547737.1731299622444 2024-11-11T04:33:42,445 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/WALs/a7bef91497aa,33113,1731299547737/a7bef91497aa%2C33113%2C1731299547737.1731299617396 to hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/oldWALs/a7bef91497aa%2C33113%2C1731299547737.1731299617396 2024-11-11T04:33:42,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43443 is added to blk_1073741845_1021 (size=93) 2024-11-11T04:33:42,461 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:33:42,461 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:33:42,461 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:33:42,461 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:33:42,461 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:33:42,462 INFO [regionserver/a7bef91497aa:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/WALs/a7bef91497aa,33113,1731299547737/a7bef91497aa%2C33113%2C1731299547737.1731299622431 with entries=1, filesize=1.22 KB; new WAL /user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/WALs/a7bef91497aa,33113,1731299547737/a7bef91497aa%2C33113%2C1731299547737.1731299622444 2024-11-11T04:33:42,463 DEBUG [regionserver/a7bef91497aa:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41903:41903),(127.0.0.1/127.0.0.1:33195:33195)] 2024-11-11T04:33:42,463 DEBUG [regionserver/a7bef91497aa:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/WALs/a7bef91497aa,33113,1731299547737/a7bef91497aa%2C33113%2C1731299547737.1731299622431 is not closed yet, will try archiving it next time 2024-11-11T04:33:42,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45519 is added to blk_1073741846_1022 (size=1258) 2024-11-11T04:33:42,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43443 is added to blk_1073741846_1022 (size=1258) 2024-11-11T04:33:42,476 INFO [RS:0;a7bef91497aa:33113-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6148582bdb35bae8083a82599a576ebb#info#compaction#3 average throughput is 10.77 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T04:33:42,477 DEBUG [RS:0;a7bef91497aa:33113-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/data/default/TestLogRolling-testSlowSyncLogRolling/6148582bdb35bae8083a82599a576ebb/.tmp/info/df10fc6cb8e94281b073161237f32a23 is 1080, key is row0001/info:/1731299560080/Put/seqid=0 2024-11-11T04:33:42,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43443 is added to blk_1073741848_1024 (size=27710) 2024-11-11T04:33:42,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45519 is added to blk_1073741848_1024 (size=27710) 2024-11-11T04:33:42,494 DEBUG [RS:0;a7bef91497aa:33113-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/data/default/TestLogRolling-testSlowSyncLogRolling/6148582bdb35bae8083a82599a576ebb/.tmp/info/df10fc6cb8e94281b073161237f32a23 as hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/data/default/TestLogRolling-testSlowSyncLogRolling/6148582bdb35bae8083a82599a576ebb/info/df10fc6cb8e94281b073161237f32a23 2024-11-11T04:33:42,511 INFO [RS:0;a7bef91497aa:33113-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 6148582bdb35bae8083a82599a576ebb/info of 6148582bdb35bae8083a82599a576ebb into df10fc6cb8e94281b073161237f32a23(size=27.1 K), total size for store is 27.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T04:33:42,512 DEBUG [RS:0;a7bef91497aa:33113-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 6148582bdb35bae8083a82599a576ebb: 2024-11-11T04:33:42,514 INFO [RS:0;a7bef91497aa:33113-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testSlowSyncLogRolling,,1731299549941.6148582bdb35bae8083a82599a576ebb., storeName=6148582bdb35bae8083a82599a576ebb/info, priority=13, startTime=1731299622431; duration=0sec 2024-11-11T04:33:42,514 DEBUG [RS:0;a7bef91497aa:33113-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-11T04:33:42,514 DEBUG [RS:0;a7bef91497aa:33113-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T04:33:42,515 DEBUG [RS:0;a7bef91497aa:33113-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/data/default/TestLogRolling-testSlowSyncLogRolling/6148582bdb35bae8083a82599a576ebb/info/df10fc6cb8e94281b073161237f32a23 because midkey is the same as first or last row 2024-11-11T04:33:42,515 DEBUG [RS:0;a7bef91497aa:33113-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-11T04:33:42,515 DEBUG [RS:0;a7bef91497aa:33113-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T04:33:42,515 DEBUG [RS:0;a7bef91497aa:33113-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/data/default/TestLogRolling-testSlowSyncLogRolling/6148582bdb35bae8083a82599a576ebb/info/df10fc6cb8e94281b073161237f32a23 because midkey is the same as first or last row 2024-11-11T04:33:42,515 DEBUG [RS:0;a7bef91497aa:33113-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-11T04:33:42,515 DEBUG [RS:0;a7bef91497aa:33113-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T04:33:42,516 DEBUG [RS:0;a7bef91497aa:33113-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/data/default/TestLogRolling-testSlowSyncLogRolling/6148582bdb35bae8083a82599a576ebb/info/df10fc6cb8e94281b073161237f32a23 because midkey is the same as first or last row 2024-11-11T04:33:42,516 DEBUG [RS:0;a7bef91497aa:33113-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T04:33:42,516 DEBUG [RS:0;a7bef91497aa:33113-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6148582bdb35bae8083a82599a576ebb:info 2024-11-11T04:33:54,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33113 {}] regionserver.HRegion(8855): Flush requested on 6148582bdb35bae8083a82599a576ebb 2024-11-11T04:33:54,469 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 6148582bdb35bae8083a82599a576ebb 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-11T04:33:54,475 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/data/default/TestLogRolling-testSlowSyncLogRolling/6148582bdb35bae8083a82599a576ebb/.tmp/info/e787d4e125eb483c98eb08685ec5652c is 1080, key is row0022/info:/1731299622446/Put/seqid=0 2024-11-11T04:33:54,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45519 is added to blk_1073741849_1025 (size=12509) 2024-11-11T04:33:54,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43443 is added to blk_1073741849_1025 (size=12509) 2024-11-11T04:33:54,483 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/data/default/TestLogRolling-testSlowSyncLogRolling/6148582bdb35bae8083a82599a576ebb/.tmp/info/e787d4e125eb483c98eb08685ec5652c 2024-11-11T04:33:54,492 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/data/default/TestLogRolling-testSlowSyncLogRolling/6148582bdb35bae8083a82599a576ebb/.tmp/info/e787d4e125eb483c98eb08685ec5652c as hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/data/default/TestLogRolling-testSlowSyncLogRolling/6148582bdb35bae8083a82599a576ebb/info/e787d4e125eb483c98eb08685ec5652c 2024-11-11T04:33:54,501 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/data/default/TestLogRolling-testSlowSyncLogRolling/6148582bdb35bae8083a82599a576ebb/info/e787d4e125eb483c98eb08685ec5652c, entries=7, sequenceid=42, filesize=12.2 K 2024-11-11T04:33:54,502 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 6148582bdb35bae8083a82599a576ebb in 34ms, sequenceid=42, compaction requested=false 2024-11-11T04:33:54,502 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 6148582bdb35bae8083a82599a576ebb: 2024-11-11T04:33:54,503 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=39.3 K, sizeToCheck=16.0 K 2024-11-11T04:33:54,503 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T04:33:54,503 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/data/default/TestLogRolling-testSlowSyncLogRolling/6148582bdb35bae8083a82599a576ebb/info/df10fc6cb8e94281b073161237f32a23 because midkey is the same as first or last row 2024-11-11T04:33:56,132 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-11T04:34:00,375 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 6148582bdb35bae8083a82599a576ebb, had cached 0 bytes from a total of 40219 2024-11-11T04:34:02,480 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-11T04:34:02,480 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-11T04:34:02,480 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T04:34:02,485 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T04:34:02,485 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T04:34:02,486 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-11T04:34:02,486 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-11T04:34:02,486 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1767583037, stopped=false 2024-11-11T04:34:02,486 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=a7bef91497aa,46043,1731299547044 2024-11-11T04:34:02,488 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46043-0x101959a09bc0000, quorum=127.0.0.1:64255, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-11T04:34:02,488 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33113-0x101959a09bc0001, quorum=127.0.0.1:64255, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-11T04:34:02,488 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46043-0x101959a09bc0000, quorum=127.0.0.1:64255, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:34:02,488 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-11T04:34:02,488 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33113-0x101959a09bc0001, quorum=127.0.0.1:64255, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:34:02,488 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-11T04:34:02,489 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T04:34:02,489 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T04:34:02,489 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:46043-0x101959a09bc0000, quorum=127.0.0.1:64255, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T04:34:02,489 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:33113-0x101959a09bc0001, quorum=127.0.0.1:64255, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T04:34:02,489 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'a7bef91497aa,33113,1731299547737' ***** 2024-11-11T04:34:02,489 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-11T04:34:02,489 INFO [RS:0;a7bef91497aa:33113 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-11T04:34:02,490 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-11T04:34:02,490 INFO [RS:0;a7bef91497aa:33113 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-11T04:34:02,490 INFO [RS:0;a7bef91497aa:33113 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-11T04:34:02,490 INFO [RS:0;a7bef91497aa:33113 {}] regionserver.HRegionServer(3091): Received CLOSE for 6148582bdb35bae8083a82599a576ebb 2024-11-11T04:34:02,491 INFO [RS:0;a7bef91497aa:33113 {}] regionserver.HRegionServer(959): stopping server a7bef91497aa,33113,1731299547737 2024-11-11T04:34:02,491 INFO [RS:0;a7bef91497aa:33113 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-11T04:34:02,491 INFO [RS:0;a7bef91497aa:33113 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;a7bef91497aa:33113. 2024-11-11T04:34:02,491 DEBUG [RS:0;a7bef91497aa:33113 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T04:34:02,491 DEBUG [RS:0;a7bef91497aa:33113 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T04:34:02,491 INFO [RS:0;a7bef91497aa:33113 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-11T04:34:02,491 INFO [RS:0;a7bef91497aa:33113 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-11T04:34:02,491 INFO [RS:0;a7bef91497aa:33113 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-11T04:34:02,491 INFO [RS:0;a7bef91497aa:33113 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-11T04:34:02,492 DEBUG [RS_CLOSE_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 6148582bdb35bae8083a82599a576ebb, disabling compactions & flushes 2024-11-11T04:34:02,492 INFO [RS_CLOSE_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1731299549941.6148582bdb35bae8083a82599a576ebb. 2024-11-11T04:34:02,492 DEBUG [RS_CLOSE_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1731299549941.6148582bdb35bae8083a82599a576ebb. 2024-11-11T04:34:02,492 DEBUG [RS_CLOSE_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1731299549941.6148582bdb35bae8083a82599a576ebb. after waiting 0 ms 2024-11-11T04:34:02,492 INFO [RS:0;a7bef91497aa:33113 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-11T04:34:02,492 DEBUG [RS_CLOSE_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1731299549941.6148582bdb35bae8083a82599a576ebb. 2024-11-11T04:34:02,492 DEBUG [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-11T04:34:02,492 DEBUG [RS:0;a7bef91497aa:33113 {}] regionserver.HRegionServer(1325): Online Regions={6148582bdb35bae8083a82599a576ebb=TestLogRolling-testSlowSyncLogRolling,,1731299549941.6148582bdb35bae8083a82599a576ebb., 1588230740=hbase:meta,,1.1588230740} 2024-11-11T04:34:02,492 INFO [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-11T04:34:02,492 DEBUG [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-11T04:34:02,492 INFO [RS_CLOSE_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 6148582bdb35bae8083a82599a576ebb 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-11-11T04:34:02,492 DEBUG [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-11T04:34:02,492 DEBUG [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-11T04:34:02,492 DEBUG [RS:0;a7bef91497aa:33113 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 6148582bdb35bae8083a82599a576ebb 2024-11-11T04:34:02,492 INFO [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.65 KB heapSize=3.67 KB 2024-11-11T04:34:02,498 DEBUG [RS_CLOSE_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/data/default/TestLogRolling-testSlowSyncLogRolling/6148582bdb35bae8083a82599a576ebb/.tmp/info/d7f23196c7b34bc0a357ce4511b240ef is 1080, key is row0029/info:/1731299636471/Put/seqid=0 2024-11-11T04:34:02,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43443 is added to blk_1073741850_1026 (size=8193) 2024-11-11T04:34:02,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45519 is added to blk_1073741850_1026 (size=8193) 2024-11-11T04:34:02,505 INFO [RS_CLOSE_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/data/default/TestLogRolling-testSlowSyncLogRolling/6148582bdb35bae8083a82599a576ebb/.tmp/info/d7f23196c7b34bc0a357ce4511b240ef 2024-11-11T04:34:02,513 DEBUG [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/data/hbase/meta/1588230740/.tmp/info/7babf3db3098415bb3d9c46d3decf12e is 195, key is TestLogRolling-testSlowSyncLogRolling,,1731299549941.6148582bdb35bae8083a82599a576ebb./info:regioninfo/1731299550400/Put/seqid=0 2024-11-11T04:34:02,514 DEBUG [RS_CLOSE_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/data/default/TestLogRolling-testSlowSyncLogRolling/6148582bdb35bae8083a82599a576ebb/.tmp/info/d7f23196c7b34bc0a357ce4511b240ef as hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/data/default/TestLogRolling-testSlowSyncLogRolling/6148582bdb35bae8083a82599a576ebb/info/d7f23196c7b34bc0a357ce4511b240ef 2024-11-11T04:34:02,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43443 is added to blk_1073741851_1027 (size=7016) 2024-11-11T04:34:02,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45519 is added to blk_1073741851_1027 (size=7016) 2024-11-11T04:34:02,519 INFO [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.45 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/data/hbase/meta/1588230740/.tmp/info/7babf3db3098415bb3d9c46d3decf12e 2024-11-11T04:34:02,523 INFO [RS_CLOSE_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/data/default/TestLogRolling-testSlowSyncLogRolling/6148582bdb35bae8083a82599a576ebb/info/d7f23196c7b34bc0a357ce4511b240ef, entries=3, sequenceid=48, filesize=8.0 K 2024-11-11T04:34:02,524 INFO [RS_CLOSE_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 6148582bdb35bae8083a82599a576ebb in 32ms, sequenceid=48, compaction requested=true 2024-11-11T04:34:02,525 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731299549941.6148582bdb35bae8083a82599a576ebb.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/data/default/TestLogRolling-testSlowSyncLogRolling/6148582bdb35bae8083a82599a576ebb/info/1917642b9d7e43a1bae2b026c993711e, hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/data/default/TestLogRolling-testSlowSyncLogRolling/6148582bdb35bae8083a82599a576ebb/info/128a0bcf75e345da822c3f20303b7b6b, hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/data/default/TestLogRolling-testSlowSyncLogRolling/6148582bdb35bae8083a82599a576ebb/info/7fcf050df02d4367b08dcee752a7abb2] to archive 2024-11-11T04:34:02,528 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731299549941.6148582bdb35bae8083a82599a576ebb.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-11T04:34:02,531 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731299549941.6148582bdb35bae8083a82599a576ebb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/data/default/TestLogRolling-testSlowSyncLogRolling/6148582bdb35bae8083a82599a576ebb/info/1917642b9d7e43a1bae2b026c993711e to hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/archive/data/default/TestLogRolling-testSlowSyncLogRolling/6148582bdb35bae8083a82599a576ebb/info/1917642b9d7e43a1bae2b026c993711e 2024-11-11T04:34:02,533 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731299549941.6148582bdb35bae8083a82599a576ebb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/data/default/TestLogRolling-testSlowSyncLogRolling/6148582bdb35bae8083a82599a576ebb/info/128a0bcf75e345da822c3f20303b7b6b to hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/archive/data/default/TestLogRolling-testSlowSyncLogRolling/6148582bdb35bae8083a82599a576ebb/info/128a0bcf75e345da822c3f20303b7b6b 2024-11-11T04:34:02,534 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731299549941.6148582bdb35bae8083a82599a576ebb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/data/default/TestLogRolling-testSlowSyncLogRolling/6148582bdb35bae8083a82599a576ebb/info/7fcf050df02d4367b08dcee752a7abb2 to hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/archive/data/default/TestLogRolling-testSlowSyncLogRolling/6148582bdb35bae8083a82599a576ebb/info/7fcf050df02d4367b08dcee752a7abb2 2024-11-11T04:34:02,542 DEBUG [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/data/hbase/meta/1588230740/.tmp/ns/2ce8a6280ae54122baf90e7cf15e91c6 is 43, key is default/ns:d/1731299549647/Put/seqid=0 2024-11-11T04:34:02,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45519 is added to blk_1073741852_1028 (size=5153) 2024-11-11T04:34:02,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43443 is added to blk_1073741852_1028 (size=5153) 2024-11-11T04:34:02,546 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731299549941.6148582bdb35bae8083a82599a576ebb.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=a7bef91497aa:46043 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-11-11T04:34:02,551 WARN [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731299549941.6148582bdb35bae8083a82599a576ebb.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [1917642b9d7e43a1bae2b026c993711e=12509, 128a0bcf75e345da822c3f20303b7b6b=12509, 7fcf050df02d4367b08dcee752a7abb2=12509] 2024-11-11T04:34:02,556 DEBUG [RS_CLOSE_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/data/default/TestLogRolling-testSlowSyncLogRolling/6148582bdb35bae8083a82599a576ebb/recovered.edits/51.seqid, newMaxSeqId=51, maxSeqId=1 2024-11-11T04:34:02,558 INFO [RS_CLOSE_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1731299549941.6148582bdb35bae8083a82599a576ebb. 2024-11-11T04:34:02,558 DEBUG [RS_CLOSE_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 6148582bdb35bae8083a82599a576ebb: Waiting for close lock at 1731299642491Running coprocessor pre-close hooks at 1731299642491Disabling compacts and flushes for region at 1731299642491Disabling writes for close at 1731299642492 (+1 ms)Obtaining lock to block concurrent updates at 1731299642492Preparing flush snapshotting stores in 6148582bdb35bae8083a82599a576ebb at 1731299642492Finished memstore snapshotting TestLogRolling-testSlowSyncLogRolling,,1731299549941.6148582bdb35bae8083a82599a576ebb., syncing WAL and waiting on mvcc, flushsize=dataSize=3228, getHeapSize=3696, getOffHeapSize=0, getCellsCount=3 at 1731299642492Flushing stores of TestLogRolling-testSlowSyncLogRolling,,1731299549941.6148582bdb35bae8083a82599a576ebb. at 1731299642494 (+2 ms)Flushing 6148582bdb35bae8083a82599a576ebb/info: creating writer at 1731299642494Flushing 6148582bdb35bae8083a82599a576ebb/info: appending metadata at 1731299642498 (+4 ms)Flushing 6148582bdb35bae8083a82599a576ebb/info: closing flushed file at 1731299642498Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@632041c4: reopening flushed file at 1731299642513 (+15 ms)Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 6148582bdb35bae8083a82599a576ebb in 32ms, sequenceid=48, compaction requested=true at 1731299642524 (+11 ms)Writing region close event to WAL at 1731299642552 (+28 ms)Running coprocessor post-close hooks at 1731299642556 (+4 ms)Closed at 1731299642558 (+2 ms) 2024-11-11T04:34:02,558 DEBUG [RS_CLOSE_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testSlowSyncLogRolling,,1731299549941.6148582bdb35bae8083a82599a576ebb. 2024-11-11T04:34:02,692 DEBUG [RS:0;a7bef91497aa:33113 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-11T04:34:02,864 INFO [regionserver/a7bef91497aa:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-11T04:34:02,864 INFO [regionserver/a7bef91497aa:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-11T04:34:02,865 INFO [regionserver/a7bef91497aa:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T04:34:02,893 DEBUG [RS:0;a7bef91497aa:33113 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-11T04:34:02,950 INFO [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/data/hbase/meta/1588230740/.tmp/ns/2ce8a6280ae54122baf90e7cf15e91c6 2024-11-11T04:34:02,974 DEBUG [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/data/hbase/meta/1588230740/.tmp/table/2f2b1124edfb47e9bf78a6a97bde9e99 is 73, key is TestLogRolling-testSlowSyncLogRolling/table:state/1731299550417/Put/seqid=0 2024-11-11T04:34:02,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45519 is added to blk_1073741853_1029 (size=5396) 2024-11-11T04:34:02,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43443 is added to blk_1073741853_1029 (size=5396) 2024-11-11T04:34:02,980 INFO [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=138 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/data/hbase/meta/1588230740/.tmp/table/2f2b1124edfb47e9bf78a6a97bde9e99 2024-11-11T04:34:02,988 DEBUG [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/data/hbase/meta/1588230740/.tmp/info/7babf3db3098415bb3d9c46d3decf12e as hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/data/hbase/meta/1588230740/info/7babf3db3098415bb3d9c46d3decf12e 2024-11-11T04:34:02,995 INFO [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/data/hbase/meta/1588230740/info/7babf3db3098415bb3d9c46d3decf12e, entries=10, sequenceid=11, filesize=6.9 K 2024-11-11T04:34:02,996 DEBUG [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/data/hbase/meta/1588230740/.tmp/ns/2ce8a6280ae54122baf90e7cf15e91c6 as hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/data/hbase/meta/1588230740/ns/2ce8a6280ae54122baf90e7cf15e91c6 2024-11-11T04:34:03,003 INFO [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/data/hbase/meta/1588230740/ns/2ce8a6280ae54122baf90e7cf15e91c6, entries=2, sequenceid=11, filesize=5.0 K 2024-11-11T04:34:03,005 DEBUG [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/data/hbase/meta/1588230740/.tmp/table/2f2b1124edfb47e9bf78a6a97bde9e99 as hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/data/hbase/meta/1588230740/table/2f2b1124edfb47e9bf78a6a97bde9e99 2024-11-11T04:34:03,012 INFO [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/data/hbase/meta/1588230740/table/2f2b1124edfb47e9bf78a6a97bde9e99, entries=2, sequenceid=11, filesize=5.3 K 2024-11-11T04:34:03,013 INFO [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 521ms, sequenceid=11, compaction requested=false 2024-11-11T04:34:03,019 DEBUG [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-11T04:34:03,020 DEBUG [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-11T04:34:03,020 INFO [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-11T04:34:03,020 DEBUG [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731299642492Running coprocessor pre-close hooks at 1731299642492Disabling compacts and flushes for region at 1731299642492Disabling writes for close at 1731299642492Obtaining lock to block concurrent updates at 1731299642492Preparing flush snapshotting stores in 1588230740 at 1731299642492Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1692, getHeapSize=3696, getOffHeapSize=0, getCellsCount=14 at 1731299642493 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731299642494 (+1 ms)Flushing 1588230740/info: creating writer at 1731299642494Flushing 1588230740/info: appending metadata at 1731299642513 (+19 ms)Flushing 1588230740/info: closing flushed file at 1731299642513Flushing 1588230740/ns: creating writer at 1731299642527 (+14 ms)Flushing 1588230740/ns: appending metadata at 1731299642542 (+15 ms)Flushing 1588230740/ns: closing flushed file at 1731299642542Flushing 1588230740/table: creating writer at 1731299642957 (+415 ms)Flushing 1588230740/table: appending metadata at 1731299642973 (+16 ms)Flushing 1588230740/table: closing flushed file at 1731299642973Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7d346b84: reopening flushed file at 1731299642987 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5d8bb5c: reopening flushed file at 1731299642995 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@37593c6a: reopening flushed file at 1731299643004 (+9 ms)Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 521ms, sequenceid=11, compaction requested=false at 1731299643014 (+10 ms)Writing region close event to WAL at 1731299643015 (+1 ms)Running coprocessor post-close hooks at 1731299643020 (+5 ms)Closed at 1731299643020 2024-11-11T04:34:03,020 DEBUG [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-11T04:34:03,093 INFO [RS:0;a7bef91497aa:33113 {}] regionserver.HRegionServer(976): stopping server a7bef91497aa,33113,1731299547737; all regions closed. 2024-11-11T04:34:03,094 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:34:03,094 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:34:03,095 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:34:03,095 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:34:03,095 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:34:03,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45519 is added to blk_1073741834_1010 (size=3066) 2024-11-11T04:34:03,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43443 is added to blk_1073741834_1010 (size=3066) 2024-11-11T04:34:03,101 DEBUG [RS:0;a7bef91497aa:33113 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/oldWALs 2024-11-11T04:34:03,101 INFO [RS:0;a7bef91497aa:33113 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog a7bef91497aa%2C33113%2C1731299547737.meta:.meta(num 1731299549488) 2024-11-11T04:34:03,102 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:34:03,102 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:34:03,102 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:34:03,102 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:34:03,102 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:34:03,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43443 is added to blk_1073741847_1023 (size=12695) 2024-11-11T04:34:03,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45519 is added to blk_1073741847_1023 (size=12695) 2024-11-11T04:34:03,108 DEBUG [RS:0;a7bef91497aa:33113 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/oldWALs 2024-11-11T04:34:03,108 INFO [RS:0;a7bef91497aa:33113 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog a7bef91497aa%2C33113%2C1731299547737:(num 1731299622444) 2024-11-11T04:34:03,108 DEBUG [RS:0;a7bef91497aa:33113 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T04:34:03,108 INFO [RS:0;a7bef91497aa:33113 {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T04:34:03,108 INFO [RS:0;a7bef91497aa:33113 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-11T04:34:03,109 INFO [RS:0;a7bef91497aa:33113 {}] hbase.ChoreService(370): Chore service for: regionserver/a7bef91497aa:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-11T04:34:03,109 INFO [RS:0;a7bef91497aa:33113 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-11T04:34:03,109 INFO [regionserver/a7bef91497aa:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-11T04:34:03,109 INFO [RS:0;a7bef91497aa:33113 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33113 2024-11-11T04:34:03,113 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33113-0x101959a09bc0001, quorum=127.0.0.1:64255, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/a7bef91497aa,33113,1731299547737 2024-11-11T04:34:03,113 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46043-0x101959a09bc0000, quorum=127.0.0.1:64255, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-11T04:34:03,113 INFO [RS:0;a7bef91497aa:33113 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-11T04:34:03,114 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [a7bef91497aa,33113,1731299547737] 2024-11-11T04:34:03,116 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/a7bef91497aa,33113,1731299547737 already deleted, retry=false 2024-11-11T04:34:03,116 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; a7bef91497aa,33113,1731299547737 expired; onlineServers=0 2024-11-11T04:34:03,116 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'a7bef91497aa,46043,1731299547044' ***** 2024-11-11T04:34:03,116 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-11T04:34:03,116 INFO [M:0;a7bef91497aa:46043 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-11T04:34:03,117 INFO [M:0;a7bef91497aa:46043 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-11T04:34:03,117 DEBUG [M:0;a7bef91497aa:46043 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-11T04:34:03,117 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-11T04:34:03,117 DEBUG [M:0;a7bef91497aa:46043 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-11T04:34:03,117 DEBUG [master/a7bef91497aa:0:becomeActiveMaster-HFileCleaner.small.0-1731299548764 {}] cleaner.HFileCleaner(306): Exit Thread[master/a7bef91497aa:0:becomeActiveMaster-HFileCleaner.small.0-1731299548764,5,FailOnTimeoutGroup] 2024-11-11T04:34:03,117 DEBUG [master/a7bef91497aa:0:becomeActiveMaster-HFileCleaner.large.0-1731299548763 {}] cleaner.HFileCleaner(306): Exit Thread[master/a7bef91497aa:0:becomeActiveMaster-HFileCleaner.large.0-1731299548763,5,FailOnTimeoutGroup] 2024-11-11T04:34:03,117 INFO [M:0;a7bef91497aa:46043 {}] hbase.ChoreService(370): Chore service for: master/a7bef91497aa:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-11T04:34:03,117 INFO [M:0;a7bef91497aa:46043 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-11T04:34:03,117 DEBUG [M:0;a7bef91497aa:46043 {}] master.HMaster(1795): Stopping service threads 2024-11-11T04:34:03,117 INFO [M:0;a7bef91497aa:46043 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-11T04:34:03,117 INFO [M:0;a7bef91497aa:46043 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-11T04:34:03,118 INFO [M:0;a7bef91497aa:46043 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-11T04:34:03,118 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-11T04:34:03,118 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46043-0x101959a09bc0000, quorum=127.0.0.1:64255, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-11T04:34:03,118 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46043-0x101959a09bc0000, quorum=127.0.0.1:64255, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:34:03,119 DEBUG [M:0;a7bef91497aa:46043 {}] zookeeper.ZKUtil(347): master:46043-0x101959a09bc0000, quorum=127.0.0.1:64255, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-11T04:34:03,119 WARN [M:0;a7bef91497aa:46043 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-11T04:34:03,119 INFO [M:0;a7bef91497aa:46043 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/.lastflushedseqids 2024-11-11T04:34:03,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43443 is added to blk_1073741854_1030 (size=130) 2024-11-11T04:34:03,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45519 is added to blk_1073741854_1030 (size=130) 2024-11-11T04:34:03,132 INFO [M:0;a7bef91497aa:46043 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-11T04:34:03,132 INFO [M:0;a7bef91497aa:46043 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-11T04:34:03,132 DEBUG [M:0;a7bef91497aa:46043 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-11T04:34:03,132 INFO [M:0;a7bef91497aa:46043 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T04:34:03,132 DEBUG [M:0;a7bef91497aa:46043 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T04:34:03,132 DEBUG [M:0;a7bef91497aa:46043 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-11T04:34:03,132 DEBUG [M:0;a7bef91497aa:46043 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T04:34:03,132 INFO [M:0;a7bef91497aa:46043 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.02 KB heapSize=29.20 KB 2024-11-11T04:34:03,150 DEBUG [M:0;a7bef91497aa:46043 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f522104e403342ee9216a6cd19c7b3db is 82, key is hbase:meta,,1/info:regioninfo/1731299549568/Put/seqid=0 2024-11-11T04:34:03,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43443 is added to blk_1073741855_1031 (size=5672) 2024-11-11T04:34:03,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45519 is added to blk_1073741855_1031 (size=5672) 2024-11-11T04:34:03,157 INFO [M:0;a7bef91497aa:46043 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f522104e403342ee9216a6cd19c7b3db 2024-11-11T04:34:03,180 DEBUG [M:0;a7bef91497aa:46043 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/511f19dcdc7d4259a6702de1a3dc6bb7 is 766, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731299550425/Put/seqid=0 2024-11-11T04:34:03,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43443 is added to blk_1073741856_1032 (size=6247) 2024-11-11T04:34:03,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45519 is added to blk_1073741856_1032 (size=6247) 2024-11-11T04:34:03,186 INFO [M:0;a7bef91497aa:46043 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.42 KB at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/511f19dcdc7d4259a6702de1a3dc6bb7 2024-11-11T04:34:03,193 INFO [M:0;a7bef91497aa:46043 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 511f19dcdc7d4259a6702de1a3dc6bb7 2024-11-11T04:34:03,209 DEBUG [M:0;a7bef91497aa:46043 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/dfba76a5a16342e19057a996c8f38330 is 69, key is a7bef91497aa,33113,1731299547737/rs:state/1731299548777/Put/seqid=0 2024-11-11T04:34:03,216 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33113-0x101959a09bc0001, quorum=127.0.0.1:64255, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T04:34:03,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45519 is added to blk_1073741857_1033 (size=5156) 2024-11-11T04:34:03,216 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33113-0x101959a09bc0001, quorum=127.0.0.1:64255, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T04:34:03,216 INFO [RS:0;a7bef91497aa:33113 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-11T04:34:03,216 INFO [M:0;a7bef91497aa:46043 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/dfba76a5a16342e19057a996c8f38330 2024-11-11T04:34:03,217 INFO [RS:0;a7bef91497aa:33113 {}] regionserver.HRegionServer(1031): Exiting; stopping=a7bef91497aa,33113,1731299547737; zookeeper connection closed. 2024-11-11T04:34:03,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43443 is added to blk_1073741857_1033 (size=5156) 2024-11-11T04:34:03,217 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@22bbcdd0 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@22bbcdd0 2024-11-11T04:34:03,218 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-11T04:34:03,239 DEBUG [M:0;a7bef91497aa:46043 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/3b51ce3f62d147468a9f50cf1e6e7342 is 52, key is load_balancer_on/state:d/1731299549920/Put/seqid=0 2024-11-11T04:34:03,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45519 is added to blk_1073741858_1034 (size=5056) 2024-11-11T04:34:03,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43443 is added to blk_1073741858_1034 (size=5056) 2024-11-11T04:34:03,250 INFO [M:0;a7bef91497aa:46043 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/3b51ce3f62d147468a9f50cf1e6e7342 2024-11-11T04:34:03,258 DEBUG [M:0;a7bef91497aa:46043 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f522104e403342ee9216a6cd19c7b3db as hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/f522104e403342ee9216a6cd19c7b3db 2024-11-11T04:34:03,265 INFO [M:0;a7bef91497aa:46043 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/f522104e403342ee9216a6cd19c7b3db, entries=8, sequenceid=59, filesize=5.5 K 2024-11-11T04:34:03,266 DEBUG [M:0;a7bef91497aa:46043 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/511f19dcdc7d4259a6702de1a3dc6bb7 as hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/511f19dcdc7d4259a6702de1a3dc6bb7 2024-11-11T04:34:03,273 INFO [M:0;a7bef91497aa:46043 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 511f19dcdc7d4259a6702de1a3dc6bb7 2024-11-11T04:34:03,273 INFO [M:0;a7bef91497aa:46043 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/511f19dcdc7d4259a6702de1a3dc6bb7, entries=6, sequenceid=59, filesize=6.1 K 2024-11-11T04:34:03,275 DEBUG [M:0;a7bef91497aa:46043 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/dfba76a5a16342e19057a996c8f38330 as hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/dfba76a5a16342e19057a996c8f38330 2024-11-11T04:34:03,282 INFO [M:0;a7bef91497aa:46043 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/dfba76a5a16342e19057a996c8f38330, entries=1, sequenceid=59, filesize=5.0 K 2024-11-11T04:34:03,283 DEBUG [M:0;a7bef91497aa:46043 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/3b51ce3f62d147468a9f50cf1e6e7342 as hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/3b51ce3f62d147468a9f50cf1e6e7342 2024-11-11T04:34:03,290 INFO [M:0;a7bef91497aa:46043 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/3b51ce3f62d147468a9f50cf1e6e7342, entries=1, sequenceid=59, filesize=4.9 K 2024-11-11T04:34:03,292 INFO [M:0;a7bef91497aa:46043 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.02 KB/23576, heapSize ~29.14 KB/29840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 160ms, sequenceid=59, compaction requested=false 2024-11-11T04:34:03,294 INFO [M:0;a7bef91497aa:46043 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T04:34:03,294 DEBUG [M:0;a7bef91497aa:46043 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731299643132Disabling compacts and flushes for region at 1731299643132Disabling writes for close at 1731299643132Obtaining lock to block concurrent updates at 1731299643132Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731299643132Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23576, getHeapSize=29840, getOffHeapSize=0, getCellsCount=70 at 1731299643133 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731299643134 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731299643134Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731299643150 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731299643150Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731299643163 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731299643179 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731299643179Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731299643193 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731299643209 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731299643209Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731299643223 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731299643239 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731299643239Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2a83d069: reopening flushed file at 1731299643257 (+18 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3bd46ec8: reopening flushed file at 1731299643265 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2011c8a4: reopening flushed file at 1731299643274 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@177febc6: reopening flushed file at 1731299643282 (+8 ms)Finished flush of dataSize ~23.02 KB/23576, heapSize ~29.14 KB/29840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 160ms, sequenceid=59, compaction requested=false at 1731299643292 (+10 ms)Writing region close event to WAL at 1731299643294 (+2 ms)Closed at 1731299643294 2024-11-11T04:34:03,295 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:34:03,295 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:34:03,295 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:34:03,296 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:34:03,296 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:34:03,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43443 is added to blk_1073741830_1006 (size=27973) 2024-11-11T04:34:03,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45519 is added to blk_1073741830_1006 (size=27973) 2024-11-11T04:34:03,299 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-11T04:34:03,299 INFO [M:0;a7bef91497aa:46043 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-11T04:34:03,299 INFO [M:0;a7bef91497aa:46043 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46043 2024-11-11T04:34:03,300 INFO [M:0;a7bef91497aa:46043 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-11T04:34:03,402 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46043-0x101959a09bc0000, quorum=127.0.0.1:64255, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T04:34:03,402 INFO [M:0;a7bef91497aa:46043 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-11T04:34:03,402 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46043-0x101959a09bc0000, quorum=127.0.0.1:64255, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T04:34:03,407 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1bf97579{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T04:34:03,409 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@22b88bcb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T04:34:03,409 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T04:34:03,409 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2d48d695{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T04:34:03,410 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@11effdcd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f40903ad-f2fe-b9c6-36ae-562e56287c00/hadoop.log.dir/,STOPPED} 2024-11-11T04:34:03,412 WARN [BP-1569374830-172.17.0.2-1731299544104 heartbeating to localhost/127.0.0.1:33227 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T04:34:03,412 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T04:34:03,412 WARN [BP-1569374830-172.17.0.2-1731299544104 heartbeating to localhost/127.0.0.1:33227 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1569374830-172.17.0.2-1731299544104 (Datanode Uuid e5096401-9f81-4bcc-b758-17a7abe469fb) service to localhost/127.0.0.1:33227 2024-11-11T04:34:03,412 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T04:34:03,414 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f40903ad-f2fe-b9c6-36ae-562e56287c00/cluster_014cb21e-57e7-8d88-1d85-bab754db7f65/data/data3/current/BP-1569374830-172.17.0.2-1731299544104 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T04:34:03,414 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f40903ad-f2fe-b9c6-36ae-562e56287c00/cluster_014cb21e-57e7-8d88-1d85-bab754db7f65/data/data4/current/BP-1569374830-172.17.0.2-1731299544104 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T04:34:03,414 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T04:34:03,416 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7b07d1ba{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T04:34:03,417 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@43e0a762{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T04:34:03,417 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T04:34:03,417 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@371e191c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T04:34:03,417 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@28778f0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f40903ad-f2fe-b9c6-36ae-562e56287c00/hadoop.log.dir/,STOPPED} 2024-11-11T04:34:03,419 WARN [BP-1569374830-172.17.0.2-1731299544104 heartbeating to localhost/127.0.0.1:33227 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T04:34:03,419 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T04:34:03,419 WARN [BP-1569374830-172.17.0.2-1731299544104 heartbeating to localhost/127.0.0.1:33227 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1569374830-172.17.0.2-1731299544104 (Datanode Uuid e9993b30-b5a1-43a2-b7d8-81e16e30c1f4) service to localhost/127.0.0.1:33227 2024-11-11T04:34:03,419 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T04:34:03,420 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f40903ad-f2fe-b9c6-36ae-562e56287c00/cluster_014cb21e-57e7-8d88-1d85-bab754db7f65/data/data1/current/BP-1569374830-172.17.0.2-1731299544104 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T04:34:03,420 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f40903ad-f2fe-b9c6-36ae-562e56287c00/cluster_014cb21e-57e7-8d88-1d85-bab754db7f65/data/data2/current/BP-1569374830-172.17.0.2-1731299544104 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T04:34:03,420 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T04:34:03,430 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@735fa16a{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-11T04:34:03,431 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T04:34:03,431 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T04:34:03,431 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T04:34:03,431 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f40903ad-f2fe-b9c6-36ae-562e56287c00/hadoop.log.dir/,STOPPED} 2024-11-11T04:34:03,442 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-11T04:34:03,472 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-11T04:34:03,482 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=77 (was 12) Potentially hanging thread: HMaster-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33227 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:33227 from jenkins.hfs.0 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: HMaster-EventLoopGroup-1-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33227 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Monitor thread for TaskMonitor java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:33227 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SSL Certificates Store Monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: ForkJoinPool-2-worker-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: nioEventLoopGroup-3-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:33227 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: LeaseRenewer:jenkins.hfs.0@localhost:33227 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RpcClient-timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async-Client-Retry-Timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: nioEventLoopGroup-2-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-4-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/a7bef91497aa:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/a7bef91497aa:0.procedureResultReporter java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: master/a7bef91497aa:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: weak-ref-cleaner-strictcontextstorage java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-4-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Potentially hanging thread: HBase-Metrics2-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Idle-Rpc-Conn-Sweeper-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:33227 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33227 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SessionTracker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Potentially hanging thread: nioEventLoopGroup-2-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: ForkJoinPool-2-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: org.apache.hadoop.hdfs.PeerCache@373a7f2c java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=402 (was 287) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=9 (was 4) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=7212 (was 7760) 2024-11-11T04:34:03,488 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=78, OpenFileDescriptor=402, MaxFileDescriptor=1048576, SystemLoadAverage=9, ProcessCount=11, AvailableMemoryMB=7211 2024-11-11T04:34:03,489 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-11T04:34:03,489 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f40903ad-f2fe-b9c6-36ae-562e56287c00/hadoop.log.dir so I do NOT create it in target/test-data/5264082c-f18d-0f85-f514-d1881f058d17 2024-11-11T04:34:03,489 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f40903ad-f2fe-b9c6-36ae-562e56287c00/hadoop.tmp.dir so I do NOT create it in target/test-data/5264082c-f18d-0f85-f514-d1881f058d17 2024-11-11T04:34:03,489 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5264082c-f18d-0f85-f514-d1881f058d17/cluster_a0ee29d1-e567-7146-d056-b0bb08b62ec4, deleteOnExit=true 2024-11-11T04:34:03,489 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-11T04:34:03,489 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5264082c-f18d-0f85-f514-d1881f058d17/test.cache.data in system properties and HBase conf 2024-11-11T04:34:03,489 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5264082c-f18d-0f85-f514-d1881f058d17/hadoop.tmp.dir in system properties and HBase conf 2024-11-11T04:34:03,490 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5264082c-f18d-0f85-f514-d1881f058d17/hadoop.log.dir in system properties and HBase conf 2024-11-11T04:34:03,490 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5264082c-f18d-0f85-f514-d1881f058d17/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-11T04:34:03,490 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5264082c-f18d-0f85-f514-d1881f058d17/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-11T04:34:03,490 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-11T04:34:03,490 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-11T04:34:03,490 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5264082c-f18d-0f85-f514-d1881f058d17/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-11T04:34:03,490 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5264082c-f18d-0f85-f514-d1881f058d17/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-11T04:34:03,490 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5264082c-f18d-0f85-f514-d1881f058d17/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-11T04:34:03,490 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5264082c-f18d-0f85-f514-d1881f058d17/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-11T04:34:03,490 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5264082c-f18d-0f85-f514-d1881f058d17/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-11T04:34:03,491 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5264082c-f18d-0f85-f514-d1881f058d17/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-11T04:34:03,491 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5264082c-f18d-0f85-f514-d1881f058d17/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-11T04:34:03,491 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5264082c-f18d-0f85-f514-d1881f058d17/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-11T04:34:03,491 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5264082c-f18d-0f85-f514-d1881f058d17/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-11T04:34:03,491 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5264082c-f18d-0f85-f514-d1881f058d17/nfs.dump.dir in system properties and HBase conf 2024-11-11T04:34:03,491 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5264082c-f18d-0f85-f514-d1881f058d17/java.io.tmpdir in system properties and HBase conf 2024-11-11T04:34:03,491 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5264082c-f18d-0f85-f514-d1881f058d17/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-11T04:34:03,491 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5264082c-f18d-0f85-f514-d1881f058d17/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-11T04:34:03,491 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5264082c-f18d-0f85-f514-d1881f058d17/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-11T04:34:03,506 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-11T04:34:03,578 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T04:34:03,583 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T04:34:03,585 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T04:34:03,585 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T04:34:03,585 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-11T04:34:03,585 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T04:34:03,586 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3197ca45{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5264082c-f18d-0f85-f514-d1881f058d17/hadoop.log.dir/,AVAILABLE} 2024-11-11T04:34:03,586 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@45e3157d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T04:34:03,700 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@511dc70f{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5264082c-f18d-0f85-f514-d1881f058d17/java.io.tmpdir/jetty-localhost-33057-hadoop-hdfs-3_4_1-tests_jar-_-any-311606381951355955/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-11T04:34:03,701 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3e469283{HTTP/1.1, (http/1.1)}{localhost:33057} 2024-11-11T04:34:03,701 INFO [Time-limited test {}] server.Server(415): Started @101503ms 2024-11-11T04:34:03,718 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-11T04:34:03,789 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T04:34:03,793 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T04:34:03,794 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T04:34:03,794 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T04:34:03,794 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-11T04:34:03,794 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4edee9ab{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5264082c-f18d-0f85-f514-d1881f058d17/hadoop.log.dir/,AVAILABLE} 2024-11-11T04:34:03,795 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@276f8783{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T04:34:03,908 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5d4bdc00{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5264082c-f18d-0f85-f514-d1881f058d17/java.io.tmpdir/jetty-localhost-38351-hadoop-hdfs-3_4_1-tests_jar-_-any-9354345403933351230/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T04:34:03,909 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@687b21ce{HTTP/1.1, (http/1.1)}{localhost:38351} 2024-11-11T04:34:03,909 INFO [Time-limited test {}] server.Server(415): Started @101711ms 2024-11-11T04:34:03,910 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T04:34:03,951 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T04:34:03,956 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T04:34:03,957 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T04:34:03,957 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T04:34:03,957 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-11T04:34:03,958 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2b44e274{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5264082c-f18d-0f85-f514-d1881f058d17/hadoop.log.dir/,AVAILABLE} 2024-11-11T04:34:03,958 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@376d199b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T04:34:04,001 WARN [Thread-439 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5264082c-f18d-0f85-f514-d1881f058d17/cluster_a0ee29d1-e567-7146-d056-b0bb08b62ec4/data/data2/current/BP-2046443487-172.17.0.2-1731299643524/current, will proceed with Du for space computation calculation, 2024-11-11T04:34:04,001 WARN [Thread-438 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5264082c-f18d-0f85-f514-d1881f058d17/cluster_a0ee29d1-e567-7146-d056-b0bb08b62ec4/data/data1/current/BP-2046443487-172.17.0.2-1731299643524/current, will proceed with Du for space computation calculation, 2024-11-11T04:34:04,025 WARN [Thread-417 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T04:34:04,028 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5ee48f5a3acde134 with lease ID 0x9a52acedb93b9255: Processing first storage report for DS-56889a17-b804-40ef-aff0-f04fcdd16c6a from datanode DatanodeRegistration(127.0.0.1:33219, datanodeUuid=767a5fae-bde5-4727-a5e9-998f1b06de97, infoPort=38873, infoSecurePort=0, ipcPort=40017, storageInfo=lv=-57;cid=testClusterID;nsid=379024232;c=1731299643524) 2024-11-11T04:34:04,028 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5ee48f5a3acde134 with lease ID 0x9a52acedb93b9255: from storage DS-56889a17-b804-40ef-aff0-f04fcdd16c6a node DatanodeRegistration(127.0.0.1:33219, datanodeUuid=767a5fae-bde5-4727-a5e9-998f1b06de97, infoPort=38873, infoSecurePort=0, ipcPort=40017, storageInfo=lv=-57;cid=testClusterID;nsid=379024232;c=1731299643524), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T04:34:04,028 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5ee48f5a3acde134 with lease ID 0x9a52acedb93b9255: Processing first storage report for DS-c5746aad-d2ef-4fdf-b946-4a6ce6d204ac from datanode DatanodeRegistration(127.0.0.1:33219, datanodeUuid=767a5fae-bde5-4727-a5e9-998f1b06de97, infoPort=38873, infoSecurePort=0, ipcPort=40017, storageInfo=lv=-57;cid=testClusterID;nsid=379024232;c=1731299643524) 2024-11-11T04:34:04,028 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5ee48f5a3acde134 with lease ID 0x9a52acedb93b9255: from storage DS-c5746aad-d2ef-4fdf-b946-4a6ce6d204ac node DatanodeRegistration(127.0.0.1:33219, datanodeUuid=767a5fae-bde5-4727-a5e9-998f1b06de97, infoPort=38873, infoSecurePort=0, ipcPort=40017, storageInfo=lv=-57;cid=testClusterID;nsid=379024232;c=1731299643524), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-11T04:34:04,076 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@824b6ae{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5264082c-f18d-0f85-f514-d1881f058d17/java.io.tmpdir/jetty-localhost-33671-hadoop-hdfs-3_4_1-tests_jar-_-any-14419661255091077636/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T04:34:04,076 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5d7e5c06{HTTP/1.1, (http/1.1)}{localhost:33671} 2024-11-11T04:34:04,077 INFO [Time-limited test {}] server.Server(415): Started @101879ms 2024-11-11T04:34:04,078 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T04:34:04,176 WARN [Thread-464 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5264082c-f18d-0f85-f514-d1881f058d17/cluster_a0ee29d1-e567-7146-d056-b0bb08b62ec4/data/data3/current/BP-2046443487-172.17.0.2-1731299643524/current, will proceed with Du for space computation calculation, 2024-11-11T04:34:04,176 WARN [Thread-465 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5264082c-f18d-0f85-f514-d1881f058d17/cluster_a0ee29d1-e567-7146-d056-b0bb08b62ec4/data/data4/current/BP-2046443487-172.17.0.2-1731299643524/current, will proceed with Du for space computation calculation, 2024-11-11T04:34:04,194 WARN [Thread-453 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T04:34:04,196 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x79d812447dd8adbd with lease ID 0x9a52acedb93b9256: Processing first storage report for DS-e8df0f8d-39c0-40e4-b4d9-bfe0331a6796 from datanode DatanodeRegistration(127.0.0.1:39507, datanodeUuid=7dcc82e1-75bc-4495-93cd-7c3343eff527, infoPort=46867, infoSecurePort=0, ipcPort=40543, storageInfo=lv=-57;cid=testClusterID;nsid=379024232;c=1731299643524) 2024-11-11T04:34:04,196 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x79d812447dd8adbd with lease ID 0x9a52acedb93b9256: from storage DS-e8df0f8d-39c0-40e4-b4d9-bfe0331a6796 node DatanodeRegistration(127.0.0.1:39507, datanodeUuid=7dcc82e1-75bc-4495-93cd-7c3343eff527, infoPort=46867, infoSecurePort=0, ipcPort=40543, storageInfo=lv=-57;cid=testClusterID;nsid=379024232;c=1731299643524), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T04:34:04,196 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x79d812447dd8adbd with lease ID 0x9a52acedb93b9256: Processing first storage report for DS-e7f0f2ec-30ad-41cd-aafb-409696f61102 from datanode DatanodeRegistration(127.0.0.1:39507, datanodeUuid=7dcc82e1-75bc-4495-93cd-7c3343eff527, infoPort=46867, infoSecurePort=0, ipcPort=40543, storageInfo=lv=-57;cid=testClusterID;nsid=379024232;c=1731299643524) 2024-11-11T04:34:04,196 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x79d812447dd8adbd with lease ID 0x9a52acedb93b9256: from storage DS-e7f0f2ec-30ad-41cd-aafb-409696f61102 node DatanodeRegistration(127.0.0.1:39507, datanodeUuid=7dcc82e1-75bc-4495-93cd-7c3343eff527, infoPort=46867, infoSecurePort=0, ipcPort=40543, storageInfo=lv=-57;cid=testClusterID;nsid=379024232;c=1731299643524), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T04:34:04,207 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5264082c-f18d-0f85-f514-d1881f058d17 2024-11-11T04:34:04,209 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5264082c-f18d-0f85-f514-d1881f058d17/cluster_a0ee29d1-e567-7146-d056-b0bb08b62ec4/zookeeper_0, clientPort=56036, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5264082c-f18d-0f85-f514-d1881f058d17/cluster_a0ee29d1-e567-7146-d056-b0bb08b62ec4/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5264082c-f18d-0f85-f514-d1881f058d17/cluster_a0ee29d1-e567-7146-d056-b0bb08b62ec4/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-11T04:34:04,210 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=56036 2024-11-11T04:34:04,211 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T04:34:04,212 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T04:34:04,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39507 is added to blk_1073741825_1001 (size=7) 2024-11-11T04:34:04,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33219 is added to blk_1073741825_1001 (size=7) 2024-11-11T04:34:04,227 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:46393/user/jenkins/test-data/344a5da6-cec7-65a9-f501-7f0adca996b8 with version=8 2024-11-11T04:34:04,227 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/hbase-staging 2024-11-11T04:34:04,229 INFO [Time-limited test {}] client.ConnectionUtils(128): master/a7bef91497aa:0 server-side Connection retries=45 2024-11-11T04:34:04,229 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T04:34:04,229 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-11T04:34:04,229 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-11T04:34:04,229 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T04:34:04,230 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-11T04:34:04,230 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-11T04:34:04,230 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-11T04:34:04,230 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37677 2024-11-11T04:34:04,232 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:37677 connecting to ZooKeeper ensemble=127.0.0.1:56036 2024-11-11T04:34:04,240 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:376770x0, quorum=127.0.0.1:56036, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-11T04:34:04,240 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:37677-0x101959b88990000 connected 2024-11-11T04:34:04,255 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T04:34:04,257 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T04:34:04,260 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37677-0x101959b88990000, quorum=127.0.0.1:56036, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T04:34:04,260 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:46393/user/jenkins/test-data/344a5da6-cec7-65a9-f501-7f0adca996b8, hbase.cluster.distributed=false 2024-11-11T04:34:04,262 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37677-0x101959b88990000, quorum=127.0.0.1:56036, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-11T04:34:04,262 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37677 2024-11-11T04:34:04,264 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37677 2024-11-11T04:34:04,264 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37677 2024-11-11T04:34:04,267 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37677 2024-11-11T04:34:04,267 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37677 2024-11-11T04:34:04,284 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/a7bef91497aa:0 server-side Connection retries=45 2024-11-11T04:34:04,284 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T04:34:04,284 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-11T04:34:04,284 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-11T04:34:04,284 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T04:34:04,284 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-11T04:34:04,284 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-11T04:34:04,285 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-11T04:34:04,285 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38907 2024-11-11T04:34:04,287 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:38907 connecting to ZooKeeper ensemble=127.0.0.1:56036 2024-11-11T04:34:04,287 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T04:34:04,290 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T04:34:04,294 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:389070x0, quorum=127.0.0.1:56036, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-11T04:34:04,295 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:38907-0x101959b88990001 connected 2024-11-11T04:34:04,295 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38907-0x101959b88990001, quorum=127.0.0.1:56036, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T04:34:04,295 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-11T04:34:04,297 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-11T04:34:04,298 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38907-0x101959b88990001, quorum=127.0.0.1:56036, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-11T04:34:04,299 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38907-0x101959b88990001, quorum=127.0.0.1:56036, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-11T04:34:04,302 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38907 2024-11-11T04:34:04,304 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38907 2024-11-11T04:34:04,304 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38907 2024-11-11T04:34:04,306 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38907 2024-11-11T04:34:04,307 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38907 2024-11-11T04:34:04,320 DEBUG [M:0;a7bef91497aa:37677 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;a7bef91497aa:37677 2024-11-11T04:34:04,324 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/a7bef91497aa,37677,1731299644229 2024-11-11T04:34:04,326 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37677-0x101959b88990000, quorum=127.0.0.1:56036, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T04:34:04,326 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38907-0x101959b88990001, quorum=127.0.0.1:56036, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T04:34:04,327 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37677-0x101959b88990000, quorum=127.0.0.1:56036, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/a7bef91497aa,37677,1731299644229 2024-11-11T04:34:04,329 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37677-0x101959b88990000, quorum=127.0.0.1:56036, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:34:04,329 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38907-0x101959b88990001, quorum=127.0.0.1:56036, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-11T04:34:04,329 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38907-0x101959b88990001, quorum=127.0.0.1:56036, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:34:04,329 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37677-0x101959b88990000, quorum=127.0.0.1:56036, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-11T04:34:04,330 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/a7bef91497aa,37677,1731299644229 from backup master directory 2024-11-11T04:34:04,332 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38907-0x101959b88990001, quorum=127.0.0.1:56036, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T04:34:04,332 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37677-0x101959b88990000, quorum=127.0.0.1:56036, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/a7bef91497aa,37677,1731299644229 2024-11-11T04:34:04,332 WARN [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-11T04:34:04,332 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=a7bef91497aa,37677,1731299644229 2024-11-11T04:34:04,332 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37677-0x101959b88990000, quorum=127.0.0.1:56036, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T04:34:04,342 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:46393/user/jenkins/test-data/344a5da6-cec7-65a9-f501-7f0adca996b8/hbase.id] with ID: b3762118-b5a0-4acf-b5db-e9bc1f219b5f 2024-11-11T04:34:04,342 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:46393/user/jenkins/test-data/344a5da6-cec7-65a9-f501-7f0adca996b8/.tmp/hbase.id 2024-11-11T04:34:04,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39507 is added to blk_1073741826_1002 (size=42) 2024-11-11T04:34:04,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33219 is added to blk_1073741826_1002 (size=42) 2024-11-11T04:34:04,350 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:46393/user/jenkins/test-data/344a5da6-cec7-65a9-f501-7f0adca996b8/.tmp/hbase.id]:[hdfs://localhost:46393/user/jenkins/test-data/344a5da6-cec7-65a9-f501-7f0adca996b8/hbase.id] 2024-11-11T04:34:04,366 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T04:34:04,366 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-11T04:34:04,368 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-11T04:34:04,372 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38907-0x101959b88990001, quorum=127.0.0.1:56036, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:34:04,372 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37677-0x101959b88990000, quorum=127.0.0.1:56036, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:34:04,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39507 is added to blk_1073741827_1003 (size=196) 2024-11-11T04:34:04,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33219 is added to blk_1073741827_1003 (size=196) 2024-11-11T04:34:04,385 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-11T04:34:04,387 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-11T04:34:04,387 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T04:34:04,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33219 is added to blk_1073741828_1004 (size=1189) 2024-11-11T04:34:04,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39507 is added to blk_1073741828_1004 (size=1189) 2024-11-11T04:34:04,399 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:46393/user/jenkins/test-data/344a5da6-cec7-65a9-f501-7f0adca996b8/MasterData/data/master/store 2024-11-11T04:34:04,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39507 is added to blk_1073741829_1005 (size=34) 2024-11-11T04:34:04,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33219 is added to blk_1073741829_1005 (size=34) 2024-11-11T04:34:04,408 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T04:34:04,409 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-11T04:34:04,409 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T04:34:04,409 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T04:34:04,409 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-11T04:34:04,409 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T04:34:04,409 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T04:34:04,409 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731299644409Disabling compacts and flushes for region at 1731299644409Disabling writes for close at 1731299644409Writing region close event to WAL at 1731299644409Closed at 1731299644409 2024-11-11T04:34:04,410 WARN [master/a7bef91497aa:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:46393/user/jenkins/test-data/344a5da6-cec7-65a9-f501-7f0adca996b8/MasterData/data/master/store/.initializing 2024-11-11T04:34:04,410 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:46393/user/jenkins/test-data/344a5da6-cec7-65a9-f501-7f0adca996b8/MasterData/WALs/a7bef91497aa,37677,1731299644229 2024-11-11T04:34:04,414 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a7bef91497aa%2C37677%2C1731299644229, suffix=, logDir=hdfs://localhost:46393/user/jenkins/test-data/344a5da6-cec7-65a9-f501-7f0adca996b8/MasterData/WALs/a7bef91497aa,37677,1731299644229, archiveDir=hdfs://localhost:46393/user/jenkins/test-data/344a5da6-cec7-65a9-f501-7f0adca996b8/MasterData/oldWALs, maxLogs=10 2024-11-11T04:34:04,414 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor a7bef91497aa%2C37677%2C1731299644229.1731299644414 2024-11-11T04:34:04,420 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/344a5da6-cec7-65a9-f501-7f0adca996b8/MasterData/WALs/a7bef91497aa,37677,1731299644229/a7bef91497aa%2C37677%2C1731299644229.1731299644414 2024-11-11T04:34:04,423 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38873:38873),(127.0.0.1/127.0.0.1:46867:46867)] 2024-11-11T04:34:04,427 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-11T04:34:04,428 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T04:34:04,428 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:34:04,428 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:34:04,429 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:34:04,431 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-11T04:34:04,431 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:34:04,432 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T04:34:04,432 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:34:04,434 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-11T04:34:04,434 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:34:04,434 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T04:34:04,435 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:34:04,437 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-11T04:34:04,437 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:34:04,437 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T04:34:04,438 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:34:04,439 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-11T04:34:04,439 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:34:04,439 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T04:34:04,440 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:34:04,440 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46393/user/jenkins/test-data/344a5da6-cec7-65a9-f501-7f0adca996b8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:34:04,441 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46393/user/jenkins/test-data/344a5da6-cec7-65a9-f501-7f0adca996b8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:34:04,443 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:34:04,443 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:34:04,443 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-11T04:34:04,445 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:34:04,447 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46393/user/jenkins/test-data/344a5da6-cec7-65a9-f501-7f0adca996b8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T04:34:04,448 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=767244, jitterRate=-0.024399608373641968}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-11T04:34:04,449 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731299644428Initializing all the Stores at 1731299644429 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731299644429Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731299644429Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731299644429Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731299644429Cleaning up temporary data from old regions at 1731299644443 (+14 ms)Region opened successfully at 1731299644449 (+6 ms) 2024-11-11T04:34:04,449 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-11T04:34:04,453 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@19d15abf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=a7bef91497aa/172.17.0.2:0 2024-11-11T04:34:04,455 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-11T04:34:04,455 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-11T04:34:04,455 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-11T04:34:04,455 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-11T04:34:04,455 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-11T04:34:04,456 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-11T04:34:04,456 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-11T04:34:04,458 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-11T04:34:04,459 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37677-0x101959b88990000, quorum=127.0.0.1:56036, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-11T04:34:04,461 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-11T04:34:04,461 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-11T04:34:04,462 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37677-0x101959b88990000, quorum=127.0.0.1:56036, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-11T04:34:04,463 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-11T04:34:04,464 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-11T04:34:04,464 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37677-0x101959b88990000, quorum=127.0.0.1:56036, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-11T04:34:04,466 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-11T04:34:04,467 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37677-0x101959b88990000, quorum=127.0.0.1:56036, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-11T04:34:04,468 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-11T04:34:04,471 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37677-0x101959b88990000, quorum=127.0.0.1:56036, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-11T04:34:04,472 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-11T04:34:04,474 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37677-0x101959b88990000, quorum=127.0.0.1:56036, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-11T04:34:04,474 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38907-0x101959b88990001, quorum=127.0.0.1:56036, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-11T04:34:04,474 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37677-0x101959b88990000, quorum=127.0.0.1:56036, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:34:04,474 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38907-0x101959b88990001, quorum=127.0.0.1:56036, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:34:04,474 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=a7bef91497aa,37677,1731299644229, sessionid=0x101959b88990000, setting cluster-up flag (Was=false) 2024-11-11T04:34:04,478 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37677-0x101959b88990000, quorum=127.0.0.1:56036, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:34:04,478 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38907-0x101959b88990001, quorum=127.0.0.1:56036, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:34:04,483 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-11T04:34:04,484 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=a7bef91497aa,37677,1731299644229 2024-11-11T04:34:04,488 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38907-0x101959b88990001, quorum=127.0.0.1:56036, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:34:04,488 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37677-0x101959b88990000, quorum=127.0.0.1:56036, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:34:04,493 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-11T04:34:04,494 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=a7bef91497aa,37677,1731299644229 2024-11-11T04:34:04,496 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:46393/user/jenkins/test-data/344a5da6-cec7-65a9-f501-7f0adca996b8/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-11T04:34:04,497 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-11T04:34:04,498 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-11T04:34:04,498 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-11T04:34:04,498 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: a7bef91497aa,37677,1731299644229 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-11T04:34:04,499 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/a7bef91497aa:0, corePoolSize=5, maxPoolSize=5 2024-11-11T04:34:04,499 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/a7bef91497aa:0, corePoolSize=5, maxPoolSize=5 2024-11-11T04:34:04,500 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/a7bef91497aa:0, corePoolSize=5, maxPoolSize=5 2024-11-11T04:34:04,500 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/a7bef91497aa:0, corePoolSize=5, maxPoolSize=5 2024-11-11T04:34:04,500 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/a7bef91497aa:0, corePoolSize=10, maxPoolSize=10 2024-11-11T04:34:04,500 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:34:04,500 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/a7bef91497aa:0, corePoolSize=2, maxPoolSize=2 2024-11-11T04:34:04,500 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:34:04,501 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731299674501 2024-11-11T04:34:04,501 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-11T04:34:04,501 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-11T04:34:04,501 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-11T04:34:04,502 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-11T04:34:04,502 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-11T04:34:04,502 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-11T04:34:04,502 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-11T04:34:04,502 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-11T04:34:04,502 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-11T04:34:04,502 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-11T04:34:04,502 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-11T04:34:04,502 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-11T04:34:04,503 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-11T04:34:04,503 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-11T04:34:04,503 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:34:04,503 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/a7bef91497aa:0:becomeActiveMaster-HFileCleaner.large.0-1731299644503,5,FailOnTimeoutGroup] 2024-11-11T04:34:04,503 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/a7bef91497aa:0:becomeActiveMaster-HFileCleaner.small.0-1731299644503,5,FailOnTimeoutGroup] 2024-11-11T04:34:04,503 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-11T04:34:04,503 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-11T04:34:04,503 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-11T04:34:04,504 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-11T04:34:04,504 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-11T04:34:04,508 INFO [RS:0;a7bef91497aa:38907 {}] regionserver.HRegionServer(746): ClusterId : b3762118-b5a0-4acf-b5db-e9bc1f219b5f 2024-11-11T04:34:04,509 DEBUG [RS:0;a7bef91497aa:38907 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-11T04:34:04,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33219 is added to blk_1073741831_1007 (size=1321) 2024-11-11T04:34:04,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39507 is added to blk_1073741831_1007 (size=1321) 2024-11-11T04:34:04,511 DEBUG [RS:0;a7bef91497aa:38907 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-11T04:34:04,511 DEBUG [RS:0;a7bef91497aa:38907 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-11T04:34:04,512 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:46393/user/jenkins/test-data/344a5da6-cec7-65a9-f501-7f0adca996b8/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-11T04:34:04,512 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:46393/user/jenkins/test-data/344a5da6-cec7-65a9-f501-7f0adca996b8 2024-11-11T04:34:04,514 DEBUG [RS:0;a7bef91497aa:38907 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-11T04:34:04,514 DEBUG [RS:0;a7bef91497aa:38907 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@71a4439a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=a7bef91497aa/172.17.0.2:0 2024-11-11T04:34:04,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33219 is added to blk_1073741832_1008 (size=32) 2024-11-11T04:34:04,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39507 is added to blk_1073741832_1008 (size=32) 2024-11-11T04:34:04,524 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T04:34:04,528 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-11T04:34:04,529 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-11T04:34:04,529 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:34:04,530 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T04:34:04,530 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-11T04:34:04,531 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-11T04:34:04,531 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:34:04,532 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T04:34:04,532 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-11T04:34:04,534 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-11T04:34:04,534 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:34:04,534 DEBUG [RS:0;a7bef91497aa:38907 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;a7bef91497aa:38907 2024-11-11T04:34:04,534 INFO [RS:0;a7bef91497aa:38907 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-11T04:34:04,534 INFO [RS:0;a7bef91497aa:38907 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-11T04:34:04,534 DEBUG [RS:0;a7bef91497aa:38907 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-11T04:34:04,534 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T04:34:04,534 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-11T04:34:04,535 INFO [RS:0;a7bef91497aa:38907 {}] regionserver.HRegionServer(2659): reportForDuty to master=a7bef91497aa,37677,1731299644229 with port=38907, startcode=1731299644284 2024-11-11T04:34:04,535 DEBUG [RS:0;a7bef91497aa:38907 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-11T04:34:04,536 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-11T04:34:04,536 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:34:04,536 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T04:34:04,536 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-11T04:34:04,537 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46393/user/jenkins/test-data/344a5da6-cec7-65a9-f501-7f0adca996b8/data/hbase/meta/1588230740 2024-11-11T04:34:04,538 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46393/user/jenkins/test-data/344a5da6-cec7-65a9-f501-7f0adca996b8/data/hbase/meta/1588230740 2024-11-11T04:34:04,538 INFO [HMaster-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37477, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-11T04:34:04,539 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37677 {}] master.ServerManager(363): Checking decommissioned status of RegionServer a7bef91497aa,38907,1731299644284 2024-11-11T04:34:04,539 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37677 {}] master.ServerManager(517): Registering regionserver=a7bef91497aa,38907,1731299644284 2024-11-11T04:34:04,540 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-11T04:34:04,540 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-11T04:34:04,541 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-11T04:34:04,541 DEBUG [RS:0;a7bef91497aa:38907 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:46393/user/jenkins/test-data/344a5da6-cec7-65a9-f501-7f0adca996b8 2024-11-11T04:34:04,541 DEBUG [RS:0;a7bef91497aa:38907 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:46393 2024-11-11T04:34:04,542 DEBUG [RS:0;a7bef91497aa:38907 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-11T04:34:04,542 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-11T04:34:04,544 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37677-0x101959b88990000, quorum=127.0.0.1:56036, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-11T04:34:04,545 DEBUG [RS:0;a7bef91497aa:38907 {}] zookeeper.ZKUtil(111): regionserver:38907-0x101959b88990001, quorum=127.0.0.1:56036, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/a7bef91497aa,38907,1731299644284 2024-11-11T04:34:04,545 WARN [RS:0;a7bef91497aa:38907 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-11T04:34:04,545 INFO [RS:0;a7bef91497aa:38907 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T04:34:04,545 DEBUG [RS:0;a7bef91497aa:38907 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:46393/user/jenkins/test-data/344a5da6-cec7-65a9-f501-7f0adca996b8/WALs/a7bef91497aa,38907,1731299644284 2024-11-11T04:34:04,546 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46393/user/jenkins/test-data/344a5da6-cec7-65a9-f501-7f0adca996b8/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T04:34:04,547 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [a7bef91497aa,38907,1731299644284] 2024-11-11T04:34:04,547 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=838547, jitterRate=0.06626825034618378}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-11T04:34:04,548 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731299644525Initializing all the Stores at 1731299644527 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731299644527Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731299644527Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731299644527Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731299644527Cleaning up temporary data from old regions at 1731299644540 (+13 ms)Region opened successfully at 1731299644548 (+8 ms) 2024-11-11T04:34:04,549 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-11T04:34:04,549 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-11T04:34:04,549 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-11T04:34:04,549 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-11T04:34:04,549 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-11T04:34:04,550 INFO [RS:0;a7bef91497aa:38907 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-11T04:34:04,552 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-11T04:34:04,552 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731299644549Disabling compacts and flushes for region at 1731299644549Disabling writes for close at 1731299644549Writing region close event to WAL at 1731299644552 (+3 ms)Closed at 1731299644552 2024-11-11T04:34:04,553 INFO [RS:0;a7bef91497aa:38907 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-11T04:34:04,553 INFO [RS:0;a7bef91497aa:38907 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-11T04:34:04,553 INFO [RS:0;a7bef91497aa:38907 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T04:34:04,554 INFO [RS:0;a7bef91497aa:38907 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-11T04:34:04,554 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-11T04:34:04,554 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-11T04:34:04,554 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-11T04:34:04,556 INFO [RS:0;a7bef91497aa:38907 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-11T04:34:04,556 INFO [RS:0;a7bef91497aa:38907 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-11T04:34:04,556 DEBUG [RS:0;a7bef91497aa:38907 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:34:04,556 DEBUG [RS:0;a7bef91497aa:38907 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:34:04,556 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-11T04:34:04,556 DEBUG [RS:0;a7bef91497aa:38907 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:34:04,556 DEBUG [RS:0;a7bef91497aa:38907 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:34:04,556 DEBUG [RS:0;a7bef91497aa:38907 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:34:04,557 DEBUG [RS:0;a7bef91497aa:38907 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/a7bef91497aa:0, corePoolSize=2, maxPoolSize=2 2024-11-11T04:34:04,557 DEBUG [RS:0;a7bef91497aa:38907 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:34:04,557 DEBUG [RS:0;a7bef91497aa:38907 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:34:04,557 DEBUG [RS:0;a7bef91497aa:38907 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:34:04,557 DEBUG [RS:0;a7bef91497aa:38907 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:34:04,557 DEBUG [RS:0;a7bef91497aa:38907 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:34:04,557 DEBUG [RS:0;a7bef91497aa:38907 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:34:04,557 DEBUG [RS:0;a7bef91497aa:38907 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/a7bef91497aa:0, corePoolSize=3, maxPoolSize=3 2024-11-11T04:34:04,557 DEBUG [RS:0;a7bef91497aa:38907 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/a7bef91497aa:0, corePoolSize=3, maxPoolSize=3 2024-11-11T04:34:04,558 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-11T04:34:04,560 INFO [RS:0;a7bef91497aa:38907 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T04:34:04,560 INFO [RS:0;a7bef91497aa:38907 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T04:34:04,560 INFO [RS:0;a7bef91497aa:38907 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T04:34:04,560 INFO [RS:0;a7bef91497aa:38907 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-11T04:34:04,560 INFO [RS:0;a7bef91497aa:38907 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-11T04:34:04,560 INFO [RS:0;a7bef91497aa:38907 {}] hbase.ChoreService(168): Chore ScheduledChore name=a7bef91497aa,38907,1731299644284-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-11T04:34:04,580 INFO [RS:0;a7bef91497aa:38907 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-11T04:34:04,580 INFO [RS:0;a7bef91497aa:38907 {}] hbase.ChoreService(168): Chore ScheduledChore name=a7bef91497aa,38907,1731299644284-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T04:34:04,581 INFO [RS:0;a7bef91497aa:38907 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T04:34:04,581 INFO [RS:0;a7bef91497aa:38907 {}] regionserver.Replication(171): a7bef91497aa,38907,1731299644284 started 2024-11-11T04:34:04,596 INFO [RS:0;a7bef91497aa:38907 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T04:34:04,596 INFO [RS:0;a7bef91497aa:38907 {}] regionserver.HRegionServer(1482): Serving as a7bef91497aa,38907,1731299644284, RpcServer on a7bef91497aa/172.17.0.2:38907, sessionid=0x101959b88990001 2024-11-11T04:34:04,596 DEBUG [RS:0;a7bef91497aa:38907 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-11T04:34:04,596 DEBUG [RS:0;a7bef91497aa:38907 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager a7bef91497aa,38907,1731299644284 2024-11-11T04:34:04,596 DEBUG [RS:0;a7bef91497aa:38907 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'a7bef91497aa,38907,1731299644284' 2024-11-11T04:34:04,596 DEBUG [RS:0;a7bef91497aa:38907 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-11T04:34:04,597 DEBUG [RS:0;a7bef91497aa:38907 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-11T04:34:04,598 DEBUG [RS:0;a7bef91497aa:38907 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-11T04:34:04,598 DEBUG [RS:0;a7bef91497aa:38907 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-11T04:34:04,598 DEBUG [RS:0;a7bef91497aa:38907 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager a7bef91497aa,38907,1731299644284 2024-11-11T04:34:04,598 DEBUG [RS:0;a7bef91497aa:38907 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'a7bef91497aa,38907,1731299644284' 2024-11-11T04:34:04,598 DEBUG [RS:0;a7bef91497aa:38907 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-11T04:34:04,598 DEBUG [RS:0;a7bef91497aa:38907 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-11T04:34:04,599 DEBUG [RS:0;a7bef91497aa:38907 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-11T04:34:04,599 INFO [RS:0;a7bef91497aa:38907 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-11T04:34:04,599 INFO [RS:0;a7bef91497aa:38907 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-11T04:34:04,702 INFO [RS:0;a7bef91497aa:38907 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a7bef91497aa%2C38907%2C1731299644284, suffix=, logDir=hdfs://localhost:46393/user/jenkins/test-data/344a5da6-cec7-65a9-f501-7f0adca996b8/WALs/a7bef91497aa,38907,1731299644284, archiveDir=hdfs://localhost:46393/user/jenkins/test-data/344a5da6-cec7-65a9-f501-7f0adca996b8/oldWALs, maxLogs=32 2024-11-11T04:34:04,704 INFO [RS:0;a7bef91497aa:38907 {}] monitor.StreamSlowMonitor(122): New stream slow monitor a7bef91497aa%2C38907%2C1731299644284.1731299644703 2024-11-11T04:34:04,708 WARN [a7bef91497aa:37677 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-11T04:34:04,711 INFO [RS:0;a7bef91497aa:38907 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/344a5da6-cec7-65a9-f501-7f0adca996b8/WALs/a7bef91497aa,38907,1731299644284/a7bef91497aa%2C38907%2C1731299644284.1731299644703 2024-11-11T04:34:04,712 DEBUG [RS:0;a7bef91497aa:38907 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46867:46867),(127.0.0.1/127.0.0.1:38873:38873)] 2024-11-11T04:34:04,959 DEBUG [a7bef91497aa:37677 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-11T04:34:04,959 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=a7bef91497aa,38907,1731299644284 2024-11-11T04:34:04,961 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as a7bef91497aa,38907,1731299644284, state=OPENING 2024-11-11T04:34:04,963 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-11T04:34:04,965 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37677-0x101959b88990000, quorum=127.0.0.1:56036, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:34:04,965 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38907-0x101959b88990001, quorum=127.0.0.1:56036, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:34:04,965 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-11T04:34:04,965 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T04:34:04,965 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T04:34:04,965 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=a7bef91497aa,38907,1731299644284}] 2024-11-11T04:34:05,119 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-11T04:34:05,121 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47871, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-11T04:34:05,126 INFO [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-11T04:34:05,126 INFO [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T04:34:05,128 INFO [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a7bef91497aa%2C38907%2C1731299644284.meta, suffix=.meta, logDir=hdfs://localhost:46393/user/jenkins/test-data/344a5da6-cec7-65a9-f501-7f0adca996b8/WALs/a7bef91497aa,38907,1731299644284, archiveDir=hdfs://localhost:46393/user/jenkins/test-data/344a5da6-cec7-65a9-f501-7f0adca996b8/oldWALs, maxLogs=32 2024-11-11T04:34:05,130 INFO [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor a7bef91497aa%2C38907%2C1731299644284.meta.1731299645130.meta 2024-11-11T04:34:05,135 INFO [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/344a5da6-cec7-65a9-f501-7f0adca996b8/WALs/a7bef91497aa,38907,1731299644284/a7bef91497aa%2C38907%2C1731299644284.meta.1731299645130.meta 2024-11-11T04:34:05,138 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46867:46867),(127.0.0.1/127.0.0.1:38873:38873)] 2024-11-11T04:34:05,139 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-11T04:34:05,139 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-11T04:34:05,139 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-11T04:34:05,139 INFO [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-11T04:34:05,140 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-11T04:34:05,140 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T04:34:05,140 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-11T04:34:05,140 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-11T04:34:05,141 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-11T04:34:05,142 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-11T04:34:05,143 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:34:05,143 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T04:34:05,143 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-11T04:34:05,144 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-11T04:34:05,145 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:34:05,145 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T04:34:05,145 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-11T04:34:05,146 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-11T04:34:05,146 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:34:05,147 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T04:34:05,147 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-11T04:34:05,148 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-11T04:34:05,148 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:34:05,148 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T04:34:05,148 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-11T04:34:05,149 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46393/user/jenkins/test-data/344a5da6-cec7-65a9-f501-7f0adca996b8/data/hbase/meta/1588230740 2024-11-11T04:34:05,151 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46393/user/jenkins/test-data/344a5da6-cec7-65a9-f501-7f0adca996b8/data/hbase/meta/1588230740 2024-11-11T04:34:05,153 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-11T04:34:05,153 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-11T04:34:05,153 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-11T04:34:05,155 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-11T04:34:05,156 INFO [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=806645, jitterRate=0.025702863931655884}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-11T04:34:05,156 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-11T04:34:05,157 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731299645140Writing region info on filesystem at 1731299645140Initializing all the Stores at 1731299645141 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731299645141Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731299645141Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731299645141Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731299645141Cleaning up temporary data from old regions at 1731299645153 (+12 ms)Running coprocessor post-open hooks at 1731299645156 (+3 ms)Region opened successfully at 1731299645157 (+1 ms) 2024-11-11T04:34:05,159 INFO [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731299645119 2024-11-11T04:34:05,162 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-11T04:34:05,162 INFO [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-11T04:34:05,163 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=a7bef91497aa,38907,1731299644284 2024-11-11T04:34:05,165 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as a7bef91497aa,38907,1731299644284, state=OPEN 2024-11-11T04:34:05,170 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38907-0x101959b88990001, quorum=127.0.0.1:56036, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-11T04:34:05,170 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37677-0x101959b88990000, quorum=127.0.0.1:56036, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-11T04:34:05,170 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=a7bef91497aa,38907,1731299644284 2024-11-11T04:34:05,170 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T04:34:05,170 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T04:34:05,174 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-11T04:34:05,174 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=a7bef91497aa,38907,1731299644284 in 205 msec 2024-11-11T04:34:05,177 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-11T04:34:05,177 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 620 msec 2024-11-11T04:34:05,178 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-11T04:34:05,178 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-11T04:34:05,179 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-11T04:34:05,179 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=a7bef91497aa,38907,1731299644284, seqNum=-1] 2024-11-11T04:34:05,180 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T04:34:05,181 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54275, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T04:34:05,188 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 690 msec 2024-11-11T04:34:05,188 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731299645188, completionTime=-1 2024-11-11T04:34:05,188 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-11T04:34:05,188 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-11T04:34:05,190 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-11T04:34:05,190 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731299705190 2024-11-11T04:34:05,190 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731299765190 2024-11-11T04:34:05,190 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-11-11T04:34:05,191 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a7bef91497aa,37677,1731299644229-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T04:34:05,191 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a7bef91497aa,37677,1731299644229-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T04:34:05,191 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a7bef91497aa,37677,1731299644229-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T04:34:05,191 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-a7bef91497aa:37677, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T04:34:05,191 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-11T04:34:05,191 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-11T04:34:05,193 DEBUG [master/a7bef91497aa:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-11T04:34:05,195 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.863sec 2024-11-11T04:34:05,195 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-11T04:34:05,195 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-11T04:34:05,195 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-11T04:34:05,195 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-11T04:34:05,195 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-11T04:34:05,196 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a7bef91497aa,37677,1731299644229-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-11T04:34:05,196 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a7bef91497aa,37677,1731299644229-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-11T04:34:05,198 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-11T04:34:05,198 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-11T04:34:05,198 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a7bef91497aa,37677,1731299644229-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T04:34:05,209 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5ffb657f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T04:34:05,209 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request a7bef91497aa,37677,-1 for getting cluster id 2024-11-11T04:34:05,209 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-11T04:34:05,211 DEBUG [HMaster-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'b3762118-b5a0-4acf-b5db-e9bc1f219b5f' 2024-11-11T04:34:05,211 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-11T04:34:05,211 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "b3762118-b5a0-4acf-b5db-e9bc1f219b5f" 2024-11-11T04:34:05,212 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@564619b4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T04:34:05,212 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [a7bef91497aa,37677,-1] 2024-11-11T04:34:05,212 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-11T04:34:05,212 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T04:34:05,214 INFO [HMaster-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42416, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-11T04:34:05,215 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2e5d9cb3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T04:34:05,216 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-11T04:34:05,217 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=a7bef91497aa,38907,1731299644284, seqNum=-1] 2024-11-11T04:34:05,217 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T04:34:05,220 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36366, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T04:34:05,226 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=a7bef91497aa,37677,1731299644229 2024-11-11T04:34:05,227 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T04:34:05,230 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-11T04:34:05,231 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-11T04:34:05,231 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-11T04:34:05,231 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T04:34:05,231 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T04:34:05,231 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T04:34:05,231 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-11T04:34:05,231 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-11T04:34:05,231 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=398545913, stopped=false 2024-11-11T04:34:05,231 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=a7bef91497aa,37677,1731299644229 2024-11-11T04:34:05,233 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38907-0x101959b88990001, quorum=127.0.0.1:56036, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-11T04:34:05,233 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37677-0x101959b88990000, quorum=127.0.0.1:56036, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-11T04:34:05,233 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-11T04:34:05,233 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38907-0x101959b88990001, quorum=127.0.0.1:56036, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:34:05,233 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37677-0x101959b88990000, quorum=127.0.0.1:56036, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:34:05,234 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-11T04:34:05,234 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:38907-0x101959b88990001, quorum=127.0.0.1:56036, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T04:34:05,234 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T04:34:05,234 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:37677-0x101959b88990000, quorum=127.0.0.1:56036, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T04:34:05,234 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T04:34:05,234 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'a7bef91497aa,38907,1731299644284' ***** 2024-11-11T04:34:05,234 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-11T04:34:05,235 INFO [RS:0;a7bef91497aa:38907 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-11T04:34:05,235 INFO [RS:0;a7bef91497aa:38907 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-11T04:34:05,235 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-11T04:34:05,235 INFO [RS:0;a7bef91497aa:38907 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-11T04:34:05,235 INFO [RS:0;a7bef91497aa:38907 {}] regionserver.HRegionServer(959): stopping server a7bef91497aa,38907,1731299644284 2024-11-11T04:34:05,235 INFO [RS:0;a7bef91497aa:38907 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-11T04:34:05,235 INFO [RS:0;a7bef91497aa:38907 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;a7bef91497aa:38907. 2024-11-11T04:34:05,235 DEBUG [RS:0;a7bef91497aa:38907 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T04:34:05,235 DEBUG [RS:0;a7bef91497aa:38907 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T04:34:05,236 INFO [RS:0;a7bef91497aa:38907 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-11T04:34:05,236 INFO [RS:0;a7bef91497aa:38907 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-11T04:34:05,236 INFO [RS:0;a7bef91497aa:38907 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-11T04:34:05,236 INFO [RS:0;a7bef91497aa:38907 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-11T04:34:05,236 INFO [RS:0;a7bef91497aa:38907 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-11T04:34:05,236 DEBUG [RS:0;a7bef91497aa:38907 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-11T04:34:05,236 DEBUG [RS:0;a7bef91497aa:38907 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-11T04:34:05,236 DEBUG [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-11T04:34:05,236 INFO [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-11T04:34:05,236 DEBUG [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-11T04:34:05,236 DEBUG [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-11T04:34:05,236 DEBUG [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-11T04:34:05,237 INFO [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-11T04:34:05,256 DEBUG [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46393/user/jenkins/test-data/344a5da6-cec7-65a9-f501-7f0adca996b8/data/hbase/meta/1588230740/.tmp/ns/fa86e1f29fe44ff3a3e7ee8d28028ddd is 43, key is default/ns:d/1731299645182/Put/seqid=0 2024-11-11T04:34:05,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39507 is added to blk_1073741835_1011 (size=5153) 2024-11-11T04:34:05,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33219 is added to blk_1073741835_1011 (size=5153) 2024-11-11T04:34:05,263 INFO [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:46393/user/jenkins/test-data/344a5da6-cec7-65a9-f501-7f0adca996b8/data/hbase/meta/1588230740/.tmp/ns/fa86e1f29fe44ff3a3e7ee8d28028ddd 2024-11-11T04:34:05,272 DEBUG [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46393/user/jenkins/test-data/344a5da6-cec7-65a9-f501-7f0adca996b8/data/hbase/meta/1588230740/.tmp/ns/fa86e1f29fe44ff3a3e7ee8d28028ddd as hdfs://localhost:46393/user/jenkins/test-data/344a5da6-cec7-65a9-f501-7f0adca996b8/data/hbase/meta/1588230740/ns/fa86e1f29fe44ff3a3e7ee8d28028ddd 2024-11-11T04:34:05,278 INFO [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46393/user/jenkins/test-data/344a5da6-cec7-65a9-f501-7f0adca996b8/data/hbase/meta/1588230740/ns/fa86e1f29fe44ff3a3e7ee8d28028ddd, entries=2, sequenceid=6, filesize=5.0 K 2024-11-11T04:34:05,280 INFO [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 44ms, sequenceid=6, compaction requested=false 2024-11-11T04:34:05,280 DEBUG [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-11T04:34:05,285 DEBUG [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46393/user/jenkins/test-data/344a5da6-cec7-65a9-f501-7f0adca996b8/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-11T04:34:05,286 DEBUG [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-11T04:34:05,286 INFO [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-11T04:34:05,286 DEBUG [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731299645236Running coprocessor pre-close hooks at 1731299645236Disabling compacts and flushes for region at 1731299645236Disabling writes for close at 1731299645236Obtaining lock to block concurrent updates at 1731299645237 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1731299645237Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1731299645237Flushing stores of hbase:meta,,1.1588230740 at 1731299645238 (+1 ms)Flushing 1588230740/ns: creating writer at 1731299645238Flushing 1588230740/ns: appending metadata at 1731299645255 (+17 ms)Flushing 1588230740/ns: closing flushed file at 1731299645255Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@79cfb4fd: reopening flushed file at 1731299645271 (+16 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 44ms, sequenceid=6, compaction requested=false at 1731299645280 (+9 ms)Writing region close event to WAL at 1731299645282 (+2 ms)Running coprocessor post-close hooks at 1731299645286 (+4 ms)Closed at 1731299645286 2024-11-11T04:34:05,286 DEBUG [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-11T04:34:05,436 INFO [RS:0;a7bef91497aa:38907 {}] regionserver.HRegionServer(976): stopping server a7bef91497aa,38907,1731299644284; all regions closed. 2024-11-11T04:34:05,437 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:34:05,437 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:34:05,437 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:34:05,437 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:34:05,438 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:34:05,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33219 is added to blk_1073741834_1010 (size=1152) 2024-11-11T04:34:05,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39507 is added to blk_1073741834_1010 (size=1152) 2024-11-11T04:34:05,443 DEBUG [RS:0;a7bef91497aa:38907 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/344a5da6-cec7-65a9-f501-7f0adca996b8/oldWALs 2024-11-11T04:34:05,443 INFO [RS:0;a7bef91497aa:38907 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog a7bef91497aa%2C38907%2C1731299644284.meta:.meta(num 1731299645130) 2024-11-11T04:34:05,443 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:34:05,443 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:34:05,443 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:34:05,443 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:34:05,444 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:34:05,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33219 is added to blk_1073741833_1009 (size=93) 2024-11-11T04:34:05,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39507 is added to blk_1073741833_1009 (size=93) 2024-11-11T04:34:05,448 DEBUG [RS:0;a7bef91497aa:38907 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/344a5da6-cec7-65a9-f501-7f0adca996b8/oldWALs 2024-11-11T04:34:05,448 INFO [RS:0;a7bef91497aa:38907 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog a7bef91497aa%2C38907%2C1731299644284:(num 1731299644703) 2024-11-11T04:34:05,448 DEBUG [RS:0;a7bef91497aa:38907 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T04:34:05,448 INFO [RS:0;a7bef91497aa:38907 {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T04:34:05,448 INFO [RS:0;a7bef91497aa:38907 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-11T04:34:05,449 INFO [RS:0;a7bef91497aa:38907 {}] hbase.ChoreService(370): Chore service for: regionserver/a7bef91497aa:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-11T04:34:05,449 INFO [RS:0;a7bef91497aa:38907 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-11T04:34:05,449 INFO [regionserver/a7bef91497aa:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-11T04:34:05,449 INFO [RS:0;a7bef91497aa:38907 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38907 2024-11-11T04:34:05,451 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37677-0x101959b88990000, quorum=127.0.0.1:56036, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-11T04:34:05,451 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38907-0x101959b88990001, quorum=127.0.0.1:56036, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/a7bef91497aa,38907,1731299644284 2024-11-11T04:34:05,451 INFO [RS:0;a7bef91497aa:38907 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-11T04:34:05,453 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [a7bef91497aa,38907,1731299644284] 2024-11-11T04:34:05,455 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/a7bef91497aa,38907,1731299644284 already deleted, retry=false 2024-11-11T04:34:05,455 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; a7bef91497aa,38907,1731299644284 expired; onlineServers=0 2024-11-11T04:34:05,455 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'a7bef91497aa,37677,1731299644229' ***** 2024-11-11T04:34:05,455 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-11T04:34:05,455 INFO [M:0;a7bef91497aa:37677 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-11T04:34:05,455 INFO [M:0;a7bef91497aa:37677 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-11T04:34:05,455 DEBUG [M:0;a7bef91497aa:37677 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-11T04:34:05,455 DEBUG [M:0;a7bef91497aa:37677 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-11T04:34:05,455 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-11T04:34:05,455 DEBUG [master/a7bef91497aa:0:becomeActiveMaster-HFileCleaner.small.0-1731299644503 {}] cleaner.HFileCleaner(306): Exit Thread[master/a7bef91497aa:0:becomeActiveMaster-HFileCleaner.small.0-1731299644503,5,FailOnTimeoutGroup] 2024-11-11T04:34:05,455 DEBUG [master/a7bef91497aa:0:becomeActiveMaster-HFileCleaner.large.0-1731299644503 {}] cleaner.HFileCleaner(306): Exit Thread[master/a7bef91497aa:0:becomeActiveMaster-HFileCleaner.large.0-1731299644503,5,FailOnTimeoutGroup] 2024-11-11T04:34:05,455 INFO [M:0;a7bef91497aa:37677 {}] hbase.ChoreService(370): Chore service for: master/a7bef91497aa:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-11T04:34:05,455 INFO [M:0;a7bef91497aa:37677 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-11T04:34:05,455 DEBUG [M:0;a7bef91497aa:37677 {}] master.HMaster(1795): Stopping service threads 2024-11-11T04:34:05,456 INFO [M:0;a7bef91497aa:37677 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-11T04:34:05,456 INFO [M:0;a7bef91497aa:37677 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-11T04:34:05,456 INFO [M:0;a7bef91497aa:37677 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-11T04:34:05,456 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-11T04:34:05,457 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37677-0x101959b88990000, quorum=127.0.0.1:56036, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-11T04:34:05,457 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37677-0x101959b88990000, quorum=127.0.0.1:56036, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:34:05,457 DEBUG [M:0;a7bef91497aa:37677 {}] zookeeper.ZKUtil(347): master:37677-0x101959b88990000, quorum=127.0.0.1:56036, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-11T04:34:05,457 WARN [M:0;a7bef91497aa:37677 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-11T04:34:05,458 INFO [M:0;a7bef91497aa:37677 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:46393/user/jenkins/test-data/344a5da6-cec7-65a9-f501-7f0adca996b8/.lastflushedseqids 2024-11-11T04:34:05,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33219 is added to blk_1073741836_1012 (size=99) 2024-11-11T04:34:05,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39507 is added to blk_1073741836_1012 (size=99) 2024-11-11T04:34:05,464 INFO [M:0;a7bef91497aa:37677 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-11T04:34:05,464 INFO [M:0;a7bef91497aa:37677 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-11T04:34:05,465 DEBUG [M:0;a7bef91497aa:37677 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-11T04:34:05,465 INFO [M:0;a7bef91497aa:37677 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T04:34:05,465 DEBUG [M:0;a7bef91497aa:37677 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T04:34:05,465 DEBUG [M:0;a7bef91497aa:37677 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-11T04:34:05,465 DEBUG [M:0;a7bef91497aa:37677 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T04:34:05,465 INFO [M:0;a7bef91497aa:37677 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-11T04:34:05,483 DEBUG [M:0;a7bef91497aa:37677 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46393/user/jenkins/test-data/344a5da6-cec7-65a9-f501-7f0adca996b8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/59a4ef702d90478282a2b1e70573e456 is 82, key is hbase:meta,,1/info:regioninfo/1731299645163/Put/seqid=0 2024-11-11T04:34:05,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39507 is added to blk_1073741837_1013 (size=5672) 2024-11-11T04:34:05,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33219 is added to blk_1073741837_1013 (size=5672) 2024-11-11T04:34:05,488 INFO [M:0;a7bef91497aa:37677 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:46393/user/jenkins/test-data/344a5da6-cec7-65a9-f501-7f0adca996b8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/59a4ef702d90478282a2b1e70573e456 2024-11-11T04:34:05,509 DEBUG [M:0;a7bef91497aa:37677 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46393/user/jenkins/test-data/344a5da6-cec7-65a9-f501-7f0adca996b8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/aed28183e44e4f1f9729ecd5a7c7aecc is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1731299645187/Put/seqid=0 2024-11-11T04:34:05,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33219 is added to blk_1073741838_1014 (size=5275) 2024-11-11T04:34:05,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39507 is added to blk_1073741838_1014 (size=5275) 2024-11-11T04:34:05,515 INFO [M:0;a7bef91497aa:37677 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:46393/user/jenkins/test-data/344a5da6-cec7-65a9-f501-7f0adca996b8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/aed28183e44e4f1f9729ecd5a7c7aecc 2024-11-11T04:34:05,537 DEBUG [M:0;a7bef91497aa:37677 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46393/user/jenkins/test-data/344a5da6-cec7-65a9-f501-7f0adca996b8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/767bb16bf7a14707877d3279285a6439 is 69, key is a7bef91497aa,38907,1731299644284/rs:state/1731299644539/Put/seqid=0 2024-11-11T04:34:05,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39507 is added to blk_1073741839_1015 (size=5156) 2024-11-11T04:34:05,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33219 is added to blk_1073741839_1015 (size=5156) 2024-11-11T04:34:05,543 INFO [M:0;a7bef91497aa:37677 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:46393/user/jenkins/test-data/344a5da6-cec7-65a9-f501-7f0adca996b8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/767bb16bf7a14707877d3279285a6439 2024-11-11T04:34:05,553 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38907-0x101959b88990001, quorum=127.0.0.1:56036, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T04:34:05,553 INFO [RS:0;a7bef91497aa:38907 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-11T04:34:05,553 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38907-0x101959b88990001, quorum=127.0.0.1:56036, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T04:34:05,553 INFO [RS:0;a7bef91497aa:38907 {}] regionserver.HRegionServer(1031): Exiting; stopping=a7bef91497aa,38907,1731299644284; zookeeper connection closed. 2024-11-11T04:34:05,553 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@cb06e41 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@cb06e41 2024-11-11T04:34:05,553 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-11T04:34:05,563 DEBUG [M:0;a7bef91497aa:37677 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46393/user/jenkins/test-data/344a5da6-cec7-65a9-f501-7f0adca996b8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/c72f64baeda34b82960566a8a06a6200 is 52, key is load_balancer_on/state:d/1731299645229/Put/seqid=0 2024-11-11T04:34:05,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39507 is added to blk_1073741840_1016 (size=5056) 2024-11-11T04:34:05,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33219 is added to blk_1073741840_1016 (size=5056) 2024-11-11T04:34:05,569 INFO [M:0;a7bef91497aa:37677 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:46393/user/jenkins/test-data/344a5da6-cec7-65a9-f501-7f0adca996b8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/c72f64baeda34b82960566a8a06a6200 2024-11-11T04:34:05,575 DEBUG [M:0;a7bef91497aa:37677 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46393/user/jenkins/test-data/344a5da6-cec7-65a9-f501-7f0adca996b8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/59a4ef702d90478282a2b1e70573e456 as hdfs://localhost:46393/user/jenkins/test-data/344a5da6-cec7-65a9-f501-7f0adca996b8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/59a4ef702d90478282a2b1e70573e456 2024-11-11T04:34:05,581 INFO [M:0;a7bef91497aa:37677 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46393/user/jenkins/test-data/344a5da6-cec7-65a9-f501-7f0adca996b8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/59a4ef702d90478282a2b1e70573e456, entries=8, sequenceid=29, filesize=5.5 K 2024-11-11T04:34:05,582 DEBUG [M:0;a7bef91497aa:37677 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46393/user/jenkins/test-data/344a5da6-cec7-65a9-f501-7f0adca996b8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/aed28183e44e4f1f9729ecd5a7c7aecc as hdfs://localhost:46393/user/jenkins/test-data/344a5da6-cec7-65a9-f501-7f0adca996b8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/aed28183e44e4f1f9729ecd5a7c7aecc 2024-11-11T04:34:05,588 INFO [M:0;a7bef91497aa:37677 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46393/user/jenkins/test-data/344a5da6-cec7-65a9-f501-7f0adca996b8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/aed28183e44e4f1f9729ecd5a7c7aecc, entries=3, sequenceid=29, filesize=5.2 K 2024-11-11T04:34:05,590 DEBUG [M:0;a7bef91497aa:37677 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46393/user/jenkins/test-data/344a5da6-cec7-65a9-f501-7f0adca996b8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/767bb16bf7a14707877d3279285a6439 as hdfs://localhost:46393/user/jenkins/test-data/344a5da6-cec7-65a9-f501-7f0adca996b8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/767bb16bf7a14707877d3279285a6439 2024-11-11T04:34:05,595 INFO [M:0;a7bef91497aa:37677 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46393/user/jenkins/test-data/344a5da6-cec7-65a9-f501-7f0adca996b8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/767bb16bf7a14707877d3279285a6439, entries=1, sequenceid=29, filesize=5.0 K 2024-11-11T04:34:05,596 DEBUG [M:0;a7bef91497aa:37677 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46393/user/jenkins/test-data/344a5da6-cec7-65a9-f501-7f0adca996b8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/c72f64baeda34b82960566a8a06a6200 as hdfs://localhost:46393/user/jenkins/test-data/344a5da6-cec7-65a9-f501-7f0adca996b8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/c72f64baeda34b82960566a8a06a6200 2024-11-11T04:34:05,601 INFO [M:0;a7bef91497aa:37677 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46393/user/jenkins/test-data/344a5da6-cec7-65a9-f501-7f0adca996b8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/c72f64baeda34b82960566a8a06a6200, entries=1, sequenceid=29, filesize=4.9 K 2024-11-11T04:34:05,603 INFO [M:0;a7bef91497aa:37677 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 138ms, sequenceid=29, compaction requested=false 2024-11-11T04:34:05,604 INFO [M:0;a7bef91497aa:37677 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T04:34:05,605 DEBUG [M:0;a7bef91497aa:37677 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731299645465Disabling compacts and flushes for region at 1731299645465Disabling writes for close at 1731299645465Obtaining lock to block concurrent updates at 1731299645465Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731299645465Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1731299645465Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731299645466 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731299645466Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731299645482 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731299645482Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731299645494 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731299645509 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731299645509Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731299645521 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731299645536 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731299645536Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731299645548 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731299645563 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731299645563Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5a7b974e: reopening flushed file at 1731299645574 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1c01ccf3: reopening flushed file at 1731299645582 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@cfd6b47: reopening flushed file at 1731299645589 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2b8c0de7: reopening flushed file at 1731299645595 (+6 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 138ms, sequenceid=29, compaction requested=false at 1731299645603 (+8 ms)Writing region close event to WAL at 1731299645604 (+1 ms)Closed at 1731299645604 2024-11-11T04:34:05,605 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:34:05,605 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:34:05,605 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:34:05,605 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:34:05,606 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:34:05,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39507 is added to blk_1073741830_1006 (size=10311) 2024-11-11T04:34:05,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33219 is added to blk_1073741830_1006 (size=10311) 2024-11-11T04:34:05,609 INFO [M:0;a7bef91497aa:37677 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-11T04:34:05,609 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-11T04:34:05,609 INFO [M:0;a7bef91497aa:37677 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37677 2024-11-11T04:34:05,609 INFO [M:0;a7bef91497aa:37677 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-11T04:34:05,711 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37677-0x101959b88990000, quorum=127.0.0.1:56036, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T04:34:05,711 INFO [M:0;a7bef91497aa:37677 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-11T04:34:05,711 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37677-0x101959b88990000, quorum=127.0.0.1:56036, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T04:34:05,714 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@824b6ae{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T04:34:05,714 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5d7e5c06{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T04:34:05,714 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T04:34:05,715 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@376d199b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T04:34:05,715 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2b44e274{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5264082c-f18d-0f85-f514-d1881f058d17/hadoop.log.dir/,STOPPED} 2024-11-11T04:34:05,716 WARN [BP-2046443487-172.17.0.2-1731299643524 heartbeating to localhost/127.0.0.1:46393 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T04:34:05,716 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T04:34:05,716 WARN [BP-2046443487-172.17.0.2-1731299643524 heartbeating to localhost/127.0.0.1:46393 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2046443487-172.17.0.2-1731299643524 (Datanode Uuid 7dcc82e1-75bc-4495-93cd-7c3343eff527) service to localhost/127.0.0.1:46393 2024-11-11T04:34:05,716 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T04:34:05,717 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5264082c-f18d-0f85-f514-d1881f058d17/cluster_a0ee29d1-e567-7146-d056-b0bb08b62ec4/data/data3/current/BP-2046443487-172.17.0.2-1731299643524 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T04:34:05,717 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5264082c-f18d-0f85-f514-d1881f058d17/cluster_a0ee29d1-e567-7146-d056-b0bb08b62ec4/data/data4/current/BP-2046443487-172.17.0.2-1731299643524 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T04:34:05,717 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T04:34:05,719 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5d4bdc00{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T04:34:05,720 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@687b21ce{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T04:34:05,720 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T04:34:05,720 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@276f8783{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T04:34:05,720 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4edee9ab{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5264082c-f18d-0f85-f514-d1881f058d17/hadoop.log.dir/,STOPPED} 2024-11-11T04:34:05,721 WARN [BP-2046443487-172.17.0.2-1731299643524 heartbeating to localhost/127.0.0.1:46393 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T04:34:05,721 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T04:34:05,721 WARN [BP-2046443487-172.17.0.2-1731299643524 heartbeating to localhost/127.0.0.1:46393 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2046443487-172.17.0.2-1731299643524 (Datanode Uuid 767a5fae-bde5-4727-a5e9-998f1b06de97) service to localhost/127.0.0.1:46393 2024-11-11T04:34:05,721 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T04:34:05,722 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5264082c-f18d-0f85-f514-d1881f058d17/cluster_a0ee29d1-e567-7146-d056-b0bb08b62ec4/data/data1/current/BP-2046443487-172.17.0.2-1731299643524 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T04:34:05,722 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5264082c-f18d-0f85-f514-d1881f058d17/cluster_a0ee29d1-e567-7146-d056-b0bb08b62ec4/data/data2/current/BP-2046443487-172.17.0.2-1731299643524 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T04:34:05,722 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T04:34:05,728 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@511dc70f{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-11T04:34:05,728 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3e469283{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T04:34:05,728 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T04:34:05,728 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@45e3157d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T04:34:05,729 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3197ca45{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5264082c-f18d-0f85-f514-d1881f058d17/hadoop.log.dir/,STOPPED} 2024-11-11T04:34:05,735 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-11T04:34:05,751 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-11T04:34:05,751 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-11T04:34:05,752 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5264082c-f18d-0f85-f514-d1881f058d17/hadoop.log.dir so I do NOT create it in target/test-data/d549239d-bab0-c761-e004-d9edc084ad03 2024-11-11T04:34:05,752 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5264082c-f18d-0f85-f514-d1881f058d17/hadoop.tmp.dir so I do NOT create it in target/test-data/d549239d-bab0-c761-e004-d9edc084ad03 2024-11-11T04:34:05,752 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/cluster_8b97c0a2-b56e-2508-7199-0bcaa1af3817, deleteOnExit=true 2024-11-11T04:34:05,752 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-11T04:34:05,752 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/test.cache.data in system properties and HBase conf 2024-11-11T04:34:05,752 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/hadoop.tmp.dir in system properties and HBase conf 2024-11-11T04:34:05,752 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/hadoop.log.dir in system properties and HBase conf 2024-11-11T04:34:05,752 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-11T04:34:05,753 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-11T04:34:05,753 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-11T04:34:05,753 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-11T04:34:05,753 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-11T04:34:05,753 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-11T04:34:05,753 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-11T04:34:05,753 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-11T04:34:05,753 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-11T04:34:05,753 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-11T04:34:05,753 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-11T04:34:05,754 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-11T04:34:05,754 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-11T04:34:05,754 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/nfs.dump.dir in system properties and HBase conf 2024-11-11T04:34:05,754 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/java.io.tmpdir in system properties and HBase conf 2024-11-11T04:34:05,754 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-11T04:34:05,754 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-11T04:34:05,754 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-11T04:34:05,768 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-11T04:34:05,847 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T04:34:05,855 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T04:34:05,856 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T04:34:05,856 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T04:34:05,857 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-11T04:34:05,858 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T04:34:05,858 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6a488aac{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/hadoop.log.dir/,AVAILABLE} 2024-11-11T04:34:05,859 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3ed3a961{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T04:34:05,970 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7cd2a640{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/java.io.tmpdir/jetty-localhost-34327-hadoop-hdfs-3_4_1-tests_jar-_-any-6558415602688492720/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-11T04:34:05,970 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@64b7b556{HTTP/1.1, (http/1.1)}{localhost:34327} 2024-11-11T04:34:05,970 INFO [Time-limited test {}] server.Server(415): Started @103773ms 2024-11-11T04:34:05,984 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-11T04:34:06,054 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T04:34:06,057 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T04:34:06,058 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T04:34:06,058 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T04:34:06,058 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-11T04:34:06,059 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@257cf4bb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/hadoop.log.dir/,AVAILABLE} 2024-11-11T04:34:06,059 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@30e7c448{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T04:34:06,171 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@ab5393f{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/java.io.tmpdir/jetty-localhost-38939-hadoop-hdfs-3_4_1-tests_jar-_-any-5849816767544389318/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T04:34:06,172 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@47f82e76{HTTP/1.1, (http/1.1)}{localhost:38939} 2024-11-11T04:34:06,172 INFO [Time-limited test {}] server.Server(415): Started @103974ms 2024-11-11T04:34:06,174 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T04:34:06,205 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T04:34:06,208 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T04:34:06,209 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T04:34:06,209 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T04:34:06,209 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-11T04:34:06,209 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@69bbaec1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/hadoop.log.dir/,AVAILABLE} 2024-11-11T04:34:06,210 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7720beab{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T04:34:06,269 WARN [Thread-658 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/cluster_8b97c0a2-b56e-2508-7199-0bcaa1af3817/data/data2/current/BP-454738336-172.17.0.2-1731299645786/current, will proceed with Du for space computation calculation, 2024-11-11T04:34:06,269 WARN [Thread-657 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/cluster_8b97c0a2-b56e-2508-7199-0bcaa1af3817/data/data1/current/BP-454738336-172.17.0.2-1731299645786/current, will proceed with Du for space computation calculation, 2024-11-11T04:34:06,286 WARN [Thread-636 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T04:34:06,290 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xba366cc466e085a with lease ID 0x57a3c7b2bc056b1c: Processing first storage report for DS-a4611af3-51d9-4d80-83b3-808bf9f260d5 from datanode DatanodeRegistration(127.0.0.1:46201, datanodeUuid=9239dcf8-2eea-4d80-baa3-fdf988b27043, infoPort=44551, infoSecurePort=0, ipcPort=42071, storageInfo=lv=-57;cid=testClusterID;nsid=1450183398;c=1731299645786) 2024-11-11T04:34:06,290 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xba366cc466e085a with lease ID 0x57a3c7b2bc056b1c: from storage DS-a4611af3-51d9-4d80-83b3-808bf9f260d5 node DatanodeRegistration(127.0.0.1:46201, datanodeUuid=9239dcf8-2eea-4d80-baa3-fdf988b27043, infoPort=44551, infoSecurePort=0, ipcPort=42071, storageInfo=lv=-57;cid=testClusterID;nsid=1450183398;c=1731299645786), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-11T04:34:06,290 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xba366cc466e085a with lease ID 0x57a3c7b2bc056b1c: Processing first storage report for DS-48a7e775-890a-4b6a-ae39-6cfb340b542e from datanode DatanodeRegistration(127.0.0.1:46201, datanodeUuid=9239dcf8-2eea-4d80-baa3-fdf988b27043, infoPort=44551, infoSecurePort=0, ipcPort=42071, storageInfo=lv=-57;cid=testClusterID;nsid=1450183398;c=1731299645786) 2024-11-11T04:34:06,290 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xba366cc466e085a with lease ID 0x57a3c7b2bc056b1c: from storage DS-48a7e775-890a-4b6a-ae39-6cfb340b542e node DatanodeRegistration(127.0.0.1:46201, datanodeUuid=9239dcf8-2eea-4d80-baa3-fdf988b27043, infoPort=44551, infoSecurePort=0, ipcPort=42071, storageInfo=lv=-57;cid=testClusterID;nsid=1450183398;c=1731299645786), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T04:34:06,325 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@21d5e4af{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/java.io.tmpdir/jetty-localhost-40127-hadoop-hdfs-3_4_1-tests_jar-_-any-8454903266022219112/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T04:34:06,325 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@145c0180{HTTP/1.1, (http/1.1)}{localhost:40127} 2024-11-11T04:34:06,325 INFO [Time-limited test {}] server.Server(415): Started @104127ms 2024-11-11T04:34:06,327 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T04:34:06,431 WARN [Thread-683 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/cluster_8b97c0a2-b56e-2508-7199-0bcaa1af3817/data/data3/current/BP-454738336-172.17.0.2-1731299645786/current, will proceed with Du for space computation calculation, 2024-11-11T04:34:06,431 WARN [Thread-684 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/cluster_8b97c0a2-b56e-2508-7199-0bcaa1af3817/data/data4/current/BP-454738336-172.17.0.2-1731299645786/current, will proceed with Du for space computation calculation, 2024-11-11T04:34:06,448 WARN [Thread-672 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T04:34:06,451 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc726d3c72e88bc4e with lease ID 0x57a3c7b2bc056b1d: Processing first storage report for DS-0b19750a-ee85-4865-8aca-62398e1e42a1 from datanode DatanodeRegistration(127.0.0.1:40723, datanodeUuid=9cbb48e6-377e-490c-b963-e735d7cbd305, infoPort=39329, infoSecurePort=0, ipcPort=40181, storageInfo=lv=-57;cid=testClusterID;nsid=1450183398;c=1731299645786) 2024-11-11T04:34:06,451 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc726d3c72e88bc4e with lease ID 0x57a3c7b2bc056b1d: from storage DS-0b19750a-ee85-4865-8aca-62398e1e42a1 node DatanodeRegistration(127.0.0.1:40723, datanodeUuid=9cbb48e6-377e-490c-b963-e735d7cbd305, infoPort=39329, infoSecurePort=0, ipcPort=40181, storageInfo=lv=-57;cid=testClusterID;nsid=1450183398;c=1731299645786), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T04:34:06,451 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc726d3c72e88bc4e with lease ID 0x57a3c7b2bc056b1d: Processing first storage report for DS-43895f34-7d23-4f68-a6f8-18b02b211e48 from datanode DatanodeRegistration(127.0.0.1:40723, datanodeUuid=9cbb48e6-377e-490c-b963-e735d7cbd305, infoPort=39329, infoSecurePort=0, ipcPort=40181, storageInfo=lv=-57;cid=testClusterID;nsid=1450183398;c=1731299645786) 2024-11-11T04:34:06,451 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc726d3c72e88bc4e with lease ID 0x57a3c7b2bc056b1d: from storage DS-43895f34-7d23-4f68-a6f8-18b02b211e48 node DatanodeRegistration(127.0.0.1:40723, datanodeUuid=9cbb48e6-377e-490c-b963-e735d7cbd305, infoPort=39329, infoSecurePort=0, ipcPort=40181, storageInfo=lv=-57;cid=testClusterID;nsid=1450183398;c=1731299645786), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T04:34:06,453 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03 2024-11-11T04:34:06,456 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/cluster_8b97c0a2-b56e-2508-7199-0bcaa1af3817/zookeeper_0, clientPort=51151, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/cluster_8b97c0a2-b56e-2508-7199-0bcaa1af3817/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/cluster_8b97c0a2-b56e-2508-7199-0bcaa1af3817/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-11T04:34:06,457 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=51151 2024-11-11T04:34:06,457 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T04:34:06,459 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T04:34:06,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46201 is added to blk_1073741825_1001 (size=7) 2024-11-11T04:34:06,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40723 is added to blk_1073741825_1001 (size=7) 2024-11-11T04:34:06,470 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094 with version=8 2024-11-11T04:34:06,470 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/hbase-staging 2024-11-11T04:34:06,472 INFO [Time-limited test {}] client.ConnectionUtils(128): master/a7bef91497aa:0 server-side Connection retries=45 2024-11-11T04:34:06,473 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T04:34:06,473 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-11T04:34:06,473 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-11T04:34:06,473 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T04:34:06,473 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-11T04:34:06,473 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-11T04:34:06,473 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-11T04:34:06,474 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34363 2024-11-11T04:34:06,475 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:34363 connecting to ZooKeeper ensemble=127.0.0.1:51151 2024-11-11T04:34:06,482 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:343630x0, quorum=127.0.0.1:51151, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-11T04:34:06,482 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:34363-0x101959b91600000 connected 2024-11-11T04:34:06,501 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T04:34:06,503 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T04:34:06,505 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34363-0x101959b91600000, quorum=127.0.0.1:51151, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T04:34:06,505 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094, hbase.cluster.distributed=false 2024-11-11T04:34:06,507 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34363-0x101959b91600000, quorum=127.0.0.1:51151, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-11T04:34:06,507 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34363 2024-11-11T04:34:06,507 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34363 2024-11-11T04:34:06,508 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34363 2024-11-11T04:34:06,508 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34363 2024-11-11T04:34:06,509 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34363 2024-11-11T04:34:06,525 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/a7bef91497aa:0 server-side Connection retries=45 2024-11-11T04:34:06,525 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T04:34:06,525 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-11T04:34:06,525 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-11T04:34:06,525 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T04:34:06,525 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-11T04:34:06,525 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-11T04:34:06,525 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-11T04:34:06,526 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38817 2024-11-11T04:34:06,527 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:38817 connecting to ZooKeeper ensemble=127.0.0.1:51151 2024-11-11T04:34:06,528 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T04:34:06,530 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T04:34:06,535 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:388170x0, quorum=127.0.0.1:51151, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-11T04:34:06,535 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:388170x0, quorum=127.0.0.1:51151, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T04:34:06,535 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:38817-0x101959b91600001 connected 2024-11-11T04:34:06,535 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-11T04:34:06,536 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-11T04:34:06,537 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38817-0x101959b91600001, quorum=127.0.0.1:51151, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-11T04:34:06,538 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38817-0x101959b91600001, quorum=127.0.0.1:51151, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-11T04:34:06,539 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38817 2024-11-11T04:34:06,539 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38817 2024-11-11T04:34:06,539 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38817 2024-11-11T04:34:06,540 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38817 2024-11-11T04:34:06,544 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38817 2024-11-11T04:34:06,563 DEBUG [M:0;a7bef91497aa:34363 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;a7bef91497aa:34363 2024-11-11T04:34:06,563 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/a7bef91497aa,34363,1731299646472 2024-11-11T04:34:06,564 INFO [regionserver/a7bef91497aa:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T04:34:06,565 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38817-0x101959b91600001, quorum=127.0.0.1:51151, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T04:34:06,565 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34363-0x101959b91600000, quorum=127.0.0.1:51151, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T04:34:06,565 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34363-0x101959b91600000, quorum=127.0.0.1:51151, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/a7bef91497aa,34363,1731299646472 2024-11-11T04:34:06,567 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38817-0x101959b91600001, quorum=127.0.0.1:51151, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-11T04:34:06,567 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34363-0x101959b91600000, quorum=127.0.0.1:51151, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:34:06,567 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38817-0x101959b91600001, quorum=127.0.0.1:51151, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:34:06,567 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34363-0x101959b91600000, quorum=127.0.0.1:51151, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-11T04:34:06,568 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/a7bef91497aa,34363,1731299646472 from backup master directory 2024-11-11T04:34:06,569 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34363-0x101959b91600000, quorum=127.0.0.1:51151, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/a7bef91497aa,34363,1731299646472 2024-11-11T04:34:06,569 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38817-0x101959b91600001, quorum=127.0.0.1:51151, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T04:34:06,569 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34363-0x101959b91600000, quorum=127.0.0.1:51151, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T04:34:06,569 WARN [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-11T04:34:06,569 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=a7bef91497aa,34363,1731299646472 2024-11-11T04:34:06,574 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/hbase.id] with ID: 259e6d66-ff6e-4e2e-8e48-556b15d17a54 2024-11-11T04:34:06,575 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/.tmp/hbase.id 2024-11-11T04:34:06,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46201 is added to blk_1073741826_1002 (size=42) 2024-11-11T04:34:06,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40723 is added to blk_1073741826_1002 (size=42) 2024-11-11T04:34:06,583 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/.tmp/hbase.id]:[hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/hbase.id] 2024-11-11T04:34:06,597 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T04:34:06,597 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-11T04:34:06,599 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-11T04:34:06,601 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34363-0x101959b91600000, quorum=127.0.0.1:51151, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:34:06,601 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38817-0x101959b91600001, quorum=127.0.0.1:51151, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:34:06,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40723 is added to blk_1073741827_1003 (size=196) 2024-11-11T04:34:06,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46201 is added to blk_1073741827_1003 (size=196) 2024-11-11T04:34:06,609 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-11T04:34:06,610 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-11T04:34:06,610 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T04:34:06,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40723 is added to blk_1073741828_1004 (size=1189) 2024-11-11T04:34:06,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46201 is added to blk_1073741828_1004 (size=1189) 2024-11-11T04:34:06,621 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/MasterData/data/master/store 2024-11-11T04:34:06,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46201 is added to blk_1073741829_1005 (size=34) 2024-11-11T04:34:06,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40723 is added to blk_1073741829_1005 (size=34) 2024-11-11T04:34:06,629 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T04:34:06,629 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-11T04:34:06,629 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T04:34:06,629 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T04:34:06,629 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-11T04:34:06,629 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T04:34:06,629 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T04:34:06,630 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731299646629Disabling compacts and flushes for region at 1731299646629Disabling writes for close at 1731299646629Writing region close event to WAL at 1731299646629Closed at 1731299646629 2024-11-11T04:34:06,630 WARN [master/a7bef91497aa:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/MasterData/data/master/store/.initializing 2024-11-11T04:34:06,631 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/MasterData/WALs/a7bef91497aa,34363,1731299646472 2024-11-11T04:34:06,634 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a7bef91497aa%2C34363%2C1731299646472, suffix=, logDir=hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/MasterData/WALs/a7bef91497aa,34363,1731299646472, archiveDir=hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/MasterData/oldWALs, maxLogs=10 2024-11-11T04:34:06,634 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor a7bef91497aa%2C34363%2C1731299646472.1731299646634 2024-11-11T04:34:06,639 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/MasterData/WALs/a7bef91497aa,34363,1731299646472/a7bef91497aa%2C34363%2C1731299646472.1731299646634 2024-11-11T04:34:06,643 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39329:39329),(127.0.0.1/127.0.0.1:44551:44551)] 2024-11-11T04:34:06,644 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-11T04:34:06,644 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T04:34:06,645 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:34:06,645 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:34:06,646 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:34:06,648 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-11T04:34:06,648 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:34:06,648 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T04:34:06,648 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:34:06,650 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-11T04:34:06,650 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:34:06,650 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T04:34:06,650 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:34:06,652 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-11T04:34:06,652 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:34:06,652 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T04:34:06,652 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:34:06,653 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-11T04:34:06,654 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:34:06,654 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T04:34:06,654 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:34:06,655 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:34:06,656 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:34:06,657 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:34:06,657 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:34:06,658 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-11T04:34:06,659 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:34:06,661 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T04:34:06,662 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=729984, jitterRate=-0.0717778354883194}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-11T04:34:06,663 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731299646645Initializing all the Stores at 1731299646646 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731299646646Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731299646646Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731299646646Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731299646646Cleaning up temporary data from old regions at 1731299646657 (+11 ms)Region opened successfully at 1731299646663 (+6 ms) 2024-11-11T04:34:06,663 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-11T04:34:06,667 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7466233f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=a7bef91497aa/172.17.0.2:0 2024-11-11T04:34:06,668 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-11T04:34:06,668 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-11T04:34:06,668 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-11T04:34:06,668 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-11T04:34:06,669 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-11T04:34:06,669 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-11T04:34:06,669 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-11T04:34:06,672 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-11T04:34:06,673 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34363-0x101959b91600000, quorum=127.0.0.1:51151, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-11T04:34:06,674 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-11T04:34:06,674 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-11T04:34:06,675 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34363-0x101959b91600000, quorum=127.0.0.1:51151, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-11T04:34:06,676 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-11T04:34:06,676 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-11T04:34:06,677 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34363-0x101959b91600000, quorum=127.0.0.1:51151, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-11T04:34:06,679 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-11T04:34:06,679 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34363-0x101959b91600000, quorum=127.0.0.1:51151, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-11T04:34:06,681 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-11T04:34:06,683 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34363-0x101959b91600000, quorum=127.0.0.1:51151, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-11T04:34:06,684 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-11T04:34:06,686 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34363-0x101959b91600000, quorum=127.0.0.1:51151, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-11T04:34:06,686 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38817-0x101959b91600001, quorum=127.0.0.1:51151, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-11T04:34:06,686 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38817-0x101959b91600001, quorum=127.0.0.1:51151, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:34:06,686 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34363-0x101959b91600000, quorum=127.0.0.1:51151, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:34:06,686 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=a7bef91497aa,34363,1731299646472, sessionid=0x101959b91600000, setting cluster-up flag (Was=false) 2024-11-11T04:34:06,689 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34363-0x101959b91600000, quorum=127.0.0.1:51151, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:34:06,689 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38817-0x101959b91600001, quorum=127.0.0.1:51151, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:34:06,695 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-11T04:34:06,696 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=a7bef91497aa,34363,1731299646472 2024-11-11T04:34:06,699 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34363-0x101959b91600000, quorum=127.0.0.1:51151, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:34:06,699 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38817-0x101959b91600001, quorum=127.0.0.1:51151, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:34:06,704 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-11T04:34:06,705 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=a7bef91497aa,34363,1731299646472 2024-11-11T04:34:06,707 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-11T04:34:06,709 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-11T04:34:06,709 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-11T04:34:06,710 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-11T04:34:06,710 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: a7bef91497aa,34363,1731299646472 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-11T04:34:06,711 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/a7bef91497aa:0, corePoolSize=5, maxPoolSize=5 2024-11-11T04:34:06,711 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/a7bef91497aa:0, corePoolSize=5, maxPoolSize=5 2024-11-11T04:34:06,711 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/a7bef91497aa:0, corePoolSize=5, maxPoolSize=5 2024-11-11T04:34:06,711 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/a7bef91497aa:0, corePoolSize=5, maxPoolSize=5 2024-11-11T04:34:06,711 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/a7bef91497aa:0, corePoolSize=10, maxPoolSize=10 2024-11-11T04:34:06,711 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:34:06,711 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/a7bef91497aa:0, corePoolSize=2, maxPoolSize=2 2024-11-11T04:34:06,712 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:34:06,712 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731299676712 2024-11-11T04:34:06,713 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-11T04:34:06,713 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-11T04:34:06,713 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-11T04:34:06,713 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-11T04:34:06,713 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-11T04:34:06,713 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-11T04:34:06,713 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-11T04:34:06,714 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-11T04:34:06,714 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-11T04:34:06,714 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-11T04:34:06,714 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-11T04:34:06,714 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-11T04:34:06,714 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-11T04:34:06,714 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-11T04:34:06,714 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/a7bef91497aa:0:becomeActiveMaster-HFileCleaner.large.0-1731299646714,5,FailOnTimeoutGroup] 2024-11-11T04:34:06,715 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/a7bef91497aa:0:becomeActiveMaster-HFileCleaner.small.0-1731299646715,5,FailOnTimeoutGroup] 2024-11-11T04:34:06,715 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-11T04:34:06,715 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-11T04:34:06,715 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-11T04:34:06,715 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-11T04:34:06,715 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:34:06,715 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-11T04:34:06,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46201 is added to blk_1073741831_1007 (size=1321) 2024-11-11T04:34:06,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40723 is added to blk_1073741831_1007 (size=1321) 2024-11-11T04:34:06,726 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-11T04:34:06,726 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094 2024-11-11T04:34:06,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46201 is added to blk_1073741832_1008 (size=32) 2024-11-11T04:34:06,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40723 is added to blk_1073741832_1008 (size=32) 2024-11-11T04:34:06,733 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T04:34:06,734 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-11T04:34:06,736 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-11T04:34:06,736 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:34:06,736 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T04:34:06,736 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-11T04:34:06,738 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-11T04:34:06,738 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:34:06,738 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T04:34:06,738 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-11T04:34:06,740 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-11T04:34:06,740 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:34:06,740 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T04:34:06,740 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-11T04:34:06,741 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-11T04:34:06,742 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:34:06,742 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T04:34:06,742 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-11T04:34:06,743 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/hbase/meta/1588230740 2024-11-11T04:34:06,743 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/hbase/meta/1588230740 2024-11-11T04:34:06,745 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-11T04:34:06,745 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-11T04:34:06,745 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-11T04:34:06,746 INFO [RS:0;a7bef91497aa:38817 {}] regionserver.HRegionServer(746): ClusterId : 259e6d66-ff6e-4e2e-8e48-556b15d17a54 2024-11-11T04:34:06,746 DEBUG [RS:0;a7bef91497aa:38817 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-11T04:34:06,747 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-11T04:34:06,749 DEBUG [RS:0;a7bef91497aa:38817 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-11T04:34:06,749 DEBUG [RS:0;a7bef91497aa:38817 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-11T04:34:06,749 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T04:34:06,749 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=693640, jitterRate=-0.11799150705337524}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-11T04:34:06,750 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731299646733Initializing all the Stores at 1731299646734 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731299646734Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731299646734Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731299646734Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731299646734Cleaning up temporary data from old regions at 1731299646745 (+11 ms)Region opened successfully at 1731299646750 (+5 ms) 2024-11-11T04:34:06,750 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-11T04:34:06,750 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-11T04:34:06,750 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-11T04:34:06,751 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-11T04:34:06,751 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-11T04:34:06,751 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-11T04:34:06,751 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731299646750Disabling compacts and flushes for region at 1731299646750Disabling writes for close at 1731299646751 (+1 ms)Writing region close event to WAL at 1731299646751Closed at 1731299646751 2024-11-11T04:34:06,752 DEBUG [RS:0;a7bef91497aa:38817 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-11T04:34:06,752 DEBUG [RS:0;a7bef91497aa:38817 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@24dd87fd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=a7bef91497aa/172.17.0.2:0 2024-11-11T04:34:06,752 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-11T04:34:06,753 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-11T04:34:06,753 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-11T04:34:06,754 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-11T04:34:06,756 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-11T04:34:06,765 DEBUG [RS:0;a7bef91497aa:38817 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;a7bef91497aa:38817 2024-11-11T04:34:06,765 INFO [RS:0;a7bef91497aa:38817 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-11T04:34:06,765 INFO [RS:0;a7bef91497aa:38817 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-11T04:34:06,765 DEBUG [RS:0;a7bef91497aa:38817 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-11T04:34:06,766 INFO [RS:0;a7bef91497aa:38817 {}] regionserver.HRegionServer(2659): reportForDuty to master=a7bef91497aa,34363,1731299646472 with port=38817, startcode=1731299646525 2024-11-11T04:34:06,766 DEBUG [RS:0;a7bef91497aa:38817 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-11T04:34:06,769 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59673, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-11T04:34:06,769 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34363 {}] master.ServerManager(363): Checking decommissioned status of RegionServer a7bef91497aa,38817,1731299646525 2024-11-11T04:34:06,769 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34363 {}] master.ServerManager(517): Registering regionserver=a7bef91497aa,38817,1731299646525 2024-11-11T04:34:06,771 DEBUG [RS:0;a7bef91497aa:38817 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094 2024-11-11T04:34:06,771 DEBUG [RS:0;a7bef91497aa:38817 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:41327 2024-11-11T04:34:06,771 DEBUG [RS:0;a7bef91497aa:38817 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-11T04:34:06,774 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34363-0x101959b91600000, quorum=127.0.0.1:51151, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-11T04:34:06,775 DEBUG [RS:0;a7bef91497aa:38817 {}] zookeeper.ZKUtil(111): regionserver:38817-0x101959b91600001, quorum=127.0.0.1:51151, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/a7bef91497aa,38817,1731299646525 2024-11-11T04:34:06,775 WARN [RS:0;a7bef91497aa:38817 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-11T04:34:06,775 INFO [RS:0;a7bef91497aa:38817 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T04:34:06,775 DEBUG [RS:0;a7bef91497aa:38817 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525 2024-11-11T04:34:06,775 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [a7bef91497aa,38817,1731299646525] 2024-11-11T04:34:06,778 INFO [RS:0;a7bef91497aa:38817 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-11T04:34:06,807 INFO [RS:0;a7bef91497aa:38817 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-11T04:34:06,808 INFO [RS:0;a7bef91497aa:38817 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-11T04:34:06,808 INFO [RS:0;a7bef91497aa:38817 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T04:34:06,808 INFO [RS:0;a7bef91497aa:38817 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-11T04:34:06,809 INFO [RS:0;a7bef91497aa:38817 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-11T04:34:06,809 INFO [RS:0;a7bef91497aa:38817 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-11T04:34:06,809 DEBUG [RS:0;a7bef91497aa:38817 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:34:06,809 DEBUG [RS:0;a7bef91497aa:38817 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:34:06,809 DEBUG [RS:0;a7bef91497aa:38817 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:34:06,809 DEBUG [RS:0;a7bef91497aa:38817 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:34:06,809 DEBUG [RS:0;a7bef91497aa:38817 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:34:06,809 DEBUG [RS:0;a7bef91497aa:38817 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/a7bef91497aa:0, corePoolSize=2, maxPoolSize=2 2024-11-11T04:34:06,809 DEBUG [RS:0;a7bef91497aa:38817 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:34:06,809 DEBUG [RS:0;a7bef91497aa:38817 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:34:06,810 DEBUG [RS:0;a7bef91497aa:38817 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:34:06,810 DEBUG [RS:0;a7bef91497aa:38817 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:34:06,810 DEBUG [RS:0;a7bef91497aa:38817 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:34:06,810 DEBUG [RS:0;a7bef91497aa:38817 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:34:06,810 DEBUG [RS:0;a7bef91497aa:38817 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/a7bef91497aa:0, corePoolSize=3, maxPoolSize=3 2024-11-11T04:34:06,810 DEBUG [RS:0;a7bef91497aa:38817 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/a7bef91497aa:0, corePoolSize=3, maxPoolSize=3 2024-11-11T04:34:06,810 INFO [RS:0;a7bef91497aa:38817 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T04:34:06,810 INFO [RS:0;a7bef91497aa:38817 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T04:34:06,810 INFO [RS:0;a7bef91497aa:38817 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T04:34:06,810 INFO [RS:0;a7bef91497aa:38817 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-11T04:34:06,810 INFO [RS:0;a7bef91497aa:38817 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-11T04:34:06,810 INFO [RS:0;a7bef91497aa:38817 {}] hbase.ChoreService(168): Chore ScheduledChore name=a7bef91497aa,38817,1731299646525-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-11T04:34:06,826 INFO [RS:0;a7bef91497aa:38817 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-11T04:34:06,826 INFO [RS:0;a7bef91497aa:38817 {}] hbase.ChoreService(168): Chore ScheduledChore name=a7bef91497aa,38817,1731299646525-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T04:34:06,826 INFO [RS:0;a7bef91497aa:38817 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T04:34:06,826 INFO [RS:0;a7bef91497aa:38817 {}] regionserver.Replication(171): a7bef91497aa,38817,1731299646525 started 2024-11-11T04:34:06,839 INFO [RS:0;a7bef91497aa:38817 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T04:34:06,839 INFO [RS:0;a7bef91497aa:38817 {}] regionserver.HRegionServer(1482): Serving as a7bef91497aa,38817,1731299646525, RpcServer on a7bef91497aa/172.17.0.2:38817, sessionid=0x101959b91600001 2024-11-11T04:34:06,840 DEBUG [RS:0;a7bef91497aa:38817 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-11T04:34:06,840 DEBUG [RS:0;a7bef91497aa:38817 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager a7bef91497aa,38817,1731299646525 2024-11-11T04:34:06,840 DEBUG [RS:0;a7bef91497aa:38817 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'a7bef91497aa,38817,1731299646525' 2024-11-11T04:34:06,840 DEBUG [RS:0;a7bef91497aa:38817 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-11T04:34:06,840 DEBUG [RS:0;a7bef91497aa:38817 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-11T04:34:06,841 DEBUG [RS:0;a7bef91497aa:38817 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-11T04:34:06,841 DEBUG [RS:0;a7bef91497aa:38817 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-11T04:34:06,841 DEBUG [RS:0;a7bef91497aa:38817 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager a7bef91497aa,38817,1731299646525 2024-11-11T04:34:06,841 DEBUG [RS:0;a7bef91497aa:38817 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'a7bef91497aa,38817,1731299646525' 2024-11-11T04:34:06,841 DEBUG [RS:0;a7bef91497aa:38817 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-11T04:34:06,841 DEBUG [RS:0;a7bef91497aa:38817 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-11T04:34:06,842 DEBUG [RS:0;a7bef91497aa:38817 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-11T04:34:06,842 INFO [RS:0;a7bef91497aa:38817 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-11T04:34:06,842 INFO [RS:0;a7bef91497aa:38817 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-11T04:34:06,906 WARN [a7bef91497aa:34363 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-11T04:34:06,944 INFO [RS:0;a7bef91497aa:38817 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a7bef91497aa%2C38817%2C1731299646525, suffix=, logDir=hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525, archiveDir=hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/oldWALs, maxLogs=32 2024-11-11T04:34:06,945 INFO [RS:0;a7bef91497aa:38817 {}] monitor.StreamSlowMonitor(122): New stream slow monitor a7bef91497aa%2C38817%2C1731299646525.1731299646945 2024-11-11T04:34:06,951 INFO [RS:0;a7bef91497aa:38817 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.1731299646945 2024-11-11T04:34:06,958 DEBUG [RS:0;a7bef91497aa:38817 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44551:44551),(127.0.0.1/127.0.0.1:39329:39329)] 2024-11-11T04:34:07,156 DEBUG [a7bef91497aa:34363 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-11T04:34:07,157 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=a7bef91497aa,38817,1731299646525 2024-11-11T04:34:07,158 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as a7bef91497aa,38817,1731299646525, state=OPENING 2024-11-11T04:34:07,161 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-11T04:34:07,164 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38817-0x101959b91600001, quorum=127.0.0.1:51151, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:34:07,164 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34363-0x101959b91600000, quorum=127.0.0.1:51151, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:34:07,165 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-11T04:34:07,165 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T04:34:07,165 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T04:34:07,165 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=a7bef91497aa,38817,1731299646525}] 2024-11-11T04:34:07,318 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-11T04:34:07,321 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60317, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-11T04:34:07,324 INFO [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-11T04:34:07,325 INFO [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T04:34:07,326 INFO [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a7bef91497aa%2C38817%2C1731299646525.meta, suffix=.meta, logDir=hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525, archiveDir=hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/oldWALs, maxLogs=32 2024-11-11T04:34:07,327 INFO [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta 2024-11-11T04:34:07,332 INFO [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta 2024-11-11T04:34:07,333 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39329:39329),(127.0.0.1/127.0.0.1:44551:44551)] 2024-11-11T04:34:07,333 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-11T04:34:07,334 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-11T04:34:07,334 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-11T04:34:07,334 INFO [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-11T04:34:07,334 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-11T04:34:07,334 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T04:34:07,334 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-11T04:34:07,334 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-11T04:34:07,336 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-11T04:34:07,336 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-11T04:34:07,337 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:34:07,337 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T04:34:07,337 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-11T04:34:07,338 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-11T04:34:07,338 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:34:07,339 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T04:34:07,339 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-11T04:34:07,340 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-11T04:34:07,340 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:34:07,340 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T04:34:07,340 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-11T04:34:07,341 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-11T04:34:07,341 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:34:07,342 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T04:34:07,342 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-11T04:34:07,342 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/hbase/meta/1588230740 2024-11-11T04:34:07,343 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/hbase/meta/1588230740 2024-11-11T04:34:07,345 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-11T04:34:07,345 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-11T04:34:07,345 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-11T04:34:07,347 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-11T04:34:07,347 INFO [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=689400, jitterRate=-0.1233830600976944}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-11T04:34:07,347 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-11T04:34:07,348 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731299647334Writing region info on filesystem at 1731299647334Initializing all the Stores at 1731299647335 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731299647335Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731299647335Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731299647335Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731299647335Cleaning up temporary data from old regions at 1731299647345 (+10 ms)Running coprocessor post-open hooks at 1731299647347 (+2 ms)Region opened successfully at 1731299647348 (+1 ms) 2024-11-11T04:34:07,349 INFO [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731299647318 2024-11-11T04:34:07,352 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-11T04:34:07,352 INFO [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-11T04:34:07,353 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=a7bef91497aa,38817,1731299646525 2024-11-11T04:34:07,354 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as a7bef91497aa,38817,1731299646525, state=OPEN 2024-11-11T04:34:07,359 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34363-0x101959b91600000, quorum=127.0.0.1:51151, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-11T04:34:07,359 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38817-0x101959b91600001, quorum=127.0.0.1:51151, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-11T04:34:07,359 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=a7bef91497aa,38817,1731299646525 2024-11-11T04:34:07,359 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T04:34:07,359 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T04:34:07,362 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-11T04:34:07,362 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=a7bef91497aa,38817,1731299646525 in 194 msec 2024-11-11T04:34:07,365 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-11T04:34:07,365 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 609 msec 2024-11-11T04:34:07,366 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-11T04:34:07,366 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-11T04:34:07,367 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-11T04:34:07,367 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=a7bef91497aa,38817,1731299646525, seqNum=-1] 2024-11-11T04:34:07,367 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T04:34:07,369 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44415, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T04:34:07,374 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 665 msec 2024-11-11T04:34:07,374 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731299647374, completionTime=-1 2024-11-11T04:34:07,375 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-11T04:34:07,375 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-11T04:34:07,376 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-11T04:34:07,376 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731299707376 2024-11-11T04:34:07,376 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731299767376 2024-11-11T04:34:07,376 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-11-11T04:34:07,377 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a7bef91497aa,34363,1731299646472-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T04:34:07,377 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a7bef91497aa,34363,1731299646472-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T04:34:07,377 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a7bef91497aa,34363,1731299646472-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T04:34:07,377 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-a7bef91497aa:34363, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T04:34:07,377 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-11T04:34:07,377 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-11T04:34:07,379 DEBUG [master/a7bef91497aa:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-11T04:34:07,381 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.812sec 2024-11-11T04:34:07,381 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-11T04:34:07,381 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-11T04:34:07,381 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-11T04:34:07,381 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-11T04:34:07,381 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-11T04:34:07,381 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a7bef91497aa,34363,1731299646472-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-11T04:34:07,381 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a7bef91497aa,34363,1731299646472-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-11T04:34:07,383 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-11T04:34:07,384 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-11T04:34:07,384 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a7bef91497aa,34363,1731299646472-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T04:34:07,446 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3efce601, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T04:34:07,446 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request a7bef91497aa,34363,-1 for getting cluster id 2024-11-11T04:34:07,447 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-11T04:34:07,448 DEBUG [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '259e6d66-ff6e-4e2e-8e48-556b15d17a54' 2024-11-11T04:34:07,449 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-11T04:34:07,449 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "259e6d66-ff6e-4e2e-8e48-556b15d17a54" 2024-11-11T04:34:07,449 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1d239e3c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T04:34:07,449 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [a7bef91497aa,34363,-1] 2024-11-11T04:34:07,449 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-11T04:34:07,450 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T04:34:07,451 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41534, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-11T04:34:07,452 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@67c279b0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T04:34:07,452 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-11T04:34:07,453 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=a7bef91497aa,38817,1731299646525, seqNum=-1] 2024-11-11T04:34:07,454 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T04:34:07,455 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58832, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T04:34:07,457 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-11T04:34:07,457 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=a7bef91497aa,34363,1731299646472 2024-11-11T04:34:07,457 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T04:34:07,460 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-11T04:34:07,475 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/a7bef91497aa:0 server-side Connection retries=45 2024-11-11T04:34:07,475 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T04:34:07,475 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-11T04:34:07,476 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-11T04:34:07,476 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T04:34:07,476 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-11T04:34:07,476 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-11T04:34:07,476 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-11T04:34:07,476 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40761 2024-11-11T04:34:07,478 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:40761 connecting to ZooKeeper ensemble=127.0.0.1:51151 2024-11-11T04:34:07,478 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T04:34:07,480 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T04:34:07,483 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:407610x0, quorum=127.0.0.1:51151, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-11T04:34:07,484 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:40761-0x101959b91600002 connected 2024-11-11T04:34:07,484 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:40761-0x101959b91600002, quorum=127.0.0.1:51151, baseZNode=/hbase Set watcher on existing znode=/hbase/running 2024-11-11T04:34:07,484 DEBUG [pool-381-thread-1 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: INIT 2024-11-11T04:34:07,485 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-11T04:34:07,488 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-11T04:34:07,489 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:40761-0x101959b91600002, quorum=127.0.0.1:51151, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-11T04:34:07,490 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40761-0x101959b91600002, quorum=127.0.0.1:51151, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-11T04:34:07,491 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40761 2024-11-11T04:34:07,491 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40761 2024-11-11T04:34:07,491 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40761 2024-11-11T04:34:07,492 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40761 2024-11-11T04:34:07,492 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40761 2024-11-11T04:34:07,494 INFO [RS:1;a7bef91497aa:40761 {}] regionserver.HRegionServer(746): ClusterId : 259e6d66-ff6e-4e2e-8e48-556b15d17a54 2024-11-11T04:34:07,494 DEBUG [RS:1;a7bef91497aa:40761 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-11T04:34:07,495 DEBUG [RS:1;a7bef91497aa:40761 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-11T04:34:07,495 DEBUG [RS:1;a7bef91497aa:40761 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-11T04:34:07,497 DEBUG [RS:1;a7bef91497aa:40761 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-11T04:34:07,498 DEBUG [RS:1;a7bef91497aa:40761 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3ecfc2ff, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=a7bef91497aa/172.17.0.2:0 2024-11-11T04:34:07,509 DEBUG [RS:1;a7bef91497aa:40761 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;a7bef91497aa:40761 2024-11-11T04:34:07,509 INFO [RS:1;a7bef91497aa:40761 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-11T04:34:07,509 INFO [RS:1;a7bef91497aa:40761 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-11T04:34:07,509 DEBUG [RS:1;a7bef91497aa:40761 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-11T04:34:07,510 INFO [RS:1;a7bef91497aa:40761 {}] regionserver.HRegionServer(2659): reportForDuty to master=a7bef91497aa,34363,1731299646472 with port=40761, startcode=1731299647475 2024-11-11T04:34:07,510 DEBUG [RS:1;a7bef91497aa:40761 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-11T04:34:07,511 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35227, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-11-11T04:34:07,512 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34363 {}] master.ServerManager(363): Checking decommissioned status of RegionServer a7bef91497aa,40761,1731299647475 2024-11-11T04:34:07,512 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34363 {}] master.ServerManager(517): Registering regionserver=a7bef91497aa,40761,1731299647475 2024-11-11T04:34:07,513 DEBUG [RS:1;a7bef91497aa:40761 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094 2024-11-11T04:34:07,514 DEBUG [RS:1;a7bef91497aa:40761 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:41327 2024-11-11T04:34:07,514 DEBUG [RS:1;a7bef91497aa:40761 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-11T04:34:07,515 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34363-0x101959b91600000, quorum=127.0.0.1:51151, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-11T04:34:07,515 DEBUG [RS:1;a7bef91497aa:40761 {}] zookeeper.ZKUtil(111): regionserver:40761-0x101959b91600002, quorum=127.0.0.1:51151, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/a7bef91497aa,40761,1731299647475 2024-11-11T04:34:07,515 WARN [RS:1;a7bef91497aa:40761 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-11T04:34:07,516 INFO [RS:1;a7bef91497aa:40761 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T04:34:07,516 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [a7bef91497aa,40761,1731299647475] 2024-11-11T04:34:07,516 DEBUG [RS:1;a7bef91497aa:40761 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475 2024-11-11T04:34:07,519 INFO [RS:1;a7bef91497aa:40761 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-11T04:34:07,521 INFO [RS:1;a7bef91497aa:40761 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-11T04:34:07,524 INFO [RS:1;a7bef91497aa:40761 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-11T04:34:07,524 INFO [RS:1;a7bef91497aa:40761 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T04:34:07,524 INFO [RS:1;a7bef91497aa:40761 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-11T04:34:07,525 INFO [RS:1;a7bef91497aa:40761 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-11T04:34:07,525 INFO [RS:1;a7bef91497aa:40761 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-11T04:34:07,525 DEBUG [RS:1;a7bef91497aa:40761 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:34:07,525 DEBUG [RS:1;a7bef91497aa:40761 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:34:07,525 DEBUG [RS:1;a7bef91497aa:40761 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:34:07,525 DEBUG [RS:1;a7bef91497aa:40761 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:34:07,525 DEBUG [RS:1;a7bef91497aa:40761 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:34:07,525 DEBUG [RS:1;a7bef91497aa:40761 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/a7bef91497aa:0, corePoolSize=2, maxPoolSize=2 2024-11-11T04:34:07,525 DEBUG [RS:1;a7bef91497aa:40761 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:34:07,525 DEBUG [RS:1;a7bef91497aa:40761 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:34:07,526 DEBUG [RS:1;a7bef91497aa:40761 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:34:07,526 DEBUG [RS:1;a7bef91497aa:40761 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:34:07,526 DEBUG [RS:1;a7bef91497aa:40761 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:34:07,526 DEBUG [RS:1;a7bef91497aa:40761 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:34:07,526 DEBUG [RS:1;a7bef91497aa:40761 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/a7bef91497aa:0, corePoolSize=3, maxPoolSize=3 2024-11-11T04:34:07,526 DEBUG [RS:1;a7bef91497aa:40761 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/a7bef91497aa:0, corePoolSize=3, maxPoolSize=3 2024-11-11T04:34:07,526 INFO [RS:1;a7bef91497aa:40761 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T04:34:07,526 INFO [RS:1;a7bef91497aa:40761 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T04:34:07,526 INFO [RS:1;a7bef91497aa:40761 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T04:34:07,526 INFO [RS:1;a7bef91497aa:40761 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-11T04:34:07,526 INFO [RS:1;a7bef91497aa:40761 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-11T04:34:07,526 INFO [RS:1;a7bef91497aa:40761 {}] hbase.ChoreService(168): Chore ScheduledChore name=a7bef91497aa,40761,1731299647475-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-11T04:34:07,541 INFO [RS:1;a7bef91497aa:40761 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-11T04:34:07,541 INFO [RS:1;a7bef91497aa:40761 {}] hbase.ChoreService(168): Chore ScheduledChore name=a7bef91497aa,40761,1731299647475-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T04:34:07,541 INFO [RS:1;a7bef91497aa:40761 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T04:34:07,541 INFO [RS:1;a7bef91497aa:40761 {}] regionserver.Replication(171): a7bef91497aa,40761,1731299647475 started 2024-11-11T04:34:07,554 INFO [RS:1;a7bef91497aa:40761 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T04:34:07,555 INFO [RS:1;a7bef91497aa:40761 {}] regionserver.HRegionServer(1482): Serving as a7bef91497aa,40761,1731299647475, RpcServer on a7bef91497aa/172.17.0.2:40761, sessionid=0x101959b91600002 2024-11-11T04:34:07,555 DEBUG [RS:1;a7bef91497aa:40761 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-11T04:34:07,555 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2882): Started new server=Thread[RS:1;a7bef91497aa:40761,5,FailOnTimeoutGroup] 2024-11-11T04:34:07,555 DEBUG [RS:1;a7bef91497aa:40761 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager a7bef91497aa,40761,1731299647475 2024-11-11T04:34:07,555 DEBUG [RS:1;a7bef91497aa:40761 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'a7bef91497aa,40761,1731299647475' 2024-11-11T04:34:07,555 DEBUG [RS:1;a7bef91497aa:40761 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-11T04:34:07,555 INFO [Time-limited test {}] wal.TestLogRolling(207): Replication=2 2024-11-11T04:34:07,555 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-11T04:34:07,555 DEBUG [RS:1;a7bef91497aa:40761 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-11T04:34:07,556 DEBUG [RS:1;a7bef91497aa:40761 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-11T04:34:07,556 DEBUG [RS:1;a7bef91497aa:40761 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-11T04:34:07,556 DEBUG [RS:1;a7bef91497aa:40761 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager a7bef91497aa,40761,1731299647475 2024-11-11T04:34:07,556 DEBUG [RS:1;a7bef91497aa:40761 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'a7bef91497aa,40761,1731299647475' 2024-11-11T04:34:07,556 DEBUG [RS:1;a7bef91497aa:40761 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-11T04:34:07,556 DEBUG [RS:1;a7bef91497aa:40761 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-11T04:34:07,557 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.AsyncConnectionImpl(321): The fetched master address is a7bef91497aa,34363,1731299646472 2024-11-11T04:34:07,557 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@1b749963 2024-11-11T04:34:07,557 DEBUG [RS:1;a7bef91497aa:40761 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-11T04:34:07,557 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-11T04:34:07,557 INFO [RS:1;a7bef91497aa:40761 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-11T04:34:07,557 INFO [RS:1;a7bef91497aa:40761 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-11T04:34:07,559 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41550, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-11T04:34:07,559 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:34:07,559 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34363 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-11T04:34:07,559 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34363 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-11T04:34:07,560 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34363 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnDatanodeDeath', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-11T04:34:07,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34363 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath 2024-11-11T04:34:07,563 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_PRE_OPERATION 2024-11-11T04:34:07,563 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:34:07,563 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34363 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnDatanodeDeath" procId is: 4 2024-11-11T04:34:07,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34363 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-11T04:34:07,565 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-11T04:34:07,565 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:34:07,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40723 is added to blk_1073741835_1011 (size=393) 2024-11-11T04:34:07,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46201 is added to blk_1073741835_1011 (size=393) 2024-11-11T04:34:07,576 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 6ddada07afcf2fed42f4de9080a351bf, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1731299647559.6ddada07afcf2fed42f4de9080a351bf.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnDatanodeDeath', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094 2024-11-11T04:34:07,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40723 is added to blk_1073741836_1012 (size=76) 2024-11-11T04:34:07,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46201 is added to blk_1073741836_1012 (size=76) 2024-11-11T04:34:07,582 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1731299647559.6ddada07afcf2fed42f4de9080a351bf.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T04:34:07,583 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1722): Closing 6ddada07afcf2fed42f4de9080a351bf, disabling compactions & flushes 2024-11-11T04:34:07,583 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1731299647559.6ddada07afcf2fed42f4de9080a351bf. 2024-11-11T04:34:07,583 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731299647559.6ddada07afcf2fed42f4de9080a351bf. 2024-11-11T04:34:07,583 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731299647559.6ddada07afcf2fed42f4de9080a351bf. after waiting 0 ms 2024-11-11T04:34:07,583 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1731299647559.6ddada07afcf2fed42f4de9080a351bf. 2024-11-11T04:34:07,583 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731299647559.6ddada07afcf2fed42f4de9080a351bf. 2024-11-11T04:34:07,583 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1676): Region close journal for 6ddada07afcf2fed42f4de9080a351bf: Waiting for close lock at 1731299647582Disabling compacts and flushes for region at 1731299647582Disabling writes for close at 1731299647583 (+1 ms)Writing region close event to WAL at 1731299647583Closed at 1731299647583 2024-11-11T04:34:07,584 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ADD_TO_META 2024-11-11T04:34:07,585 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnDatanodeDeath,,1731299647559.6ddada07afcf2fed42f4de9080a351bf.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1731299647585"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731299647585"}]},"ts":"1731299647585"} 2024-11-11T04:34:07,588 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-11T04:34:07,589 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-11T04:34:07,589 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731299647589"}]},"ts":"1731299647589"} 2024-11-11T04:34:07,591 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLING in hbase:meta 2024-11-11T04:34:07,591 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=6ddada07afcf2fed42f4de9080a351bf, ASSIGN}] 2024-11-11T04:34:07,593 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=6ddada07afcf2fed42f4de9080a351bf, ASSIGN 2024-11-11T04:34:07,594 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=6ddada07afcf2fed42f4de9080a351bf, ASSIGN; state=OFFLINE, location=a7bef91497aa,38817,1731299646525; forceNewPlan=false, retain=false 2024-11-11T04:34:07,659 INFO [RS:1;a7bef91497aa:40761 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a7bef91497aa%2C40761%2C1731299647475, suffix=, logDir=hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475, archiveDir=hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/oldWALs, maxLogs=32 2024-11-11T04:34:07,660 INFO [RS:1;a7bef91497aa:40761 {}] monitor.StreamSlowMonitor(122): New stream slow monitor a7bef91497aa%2C40761%2C1731299647475.1731299647660 2024-11-11T04:34:07,666 INFO [RS:1;a7bef91497aa:40761 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 2024-11-11T04:34:07,667 DEBUG [RS:1;a7bef91497aa:40761 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39329:39329),(127.0.0.1/127.0.0.1:44551:44551)] 2024-11-11T04:34:07,745 INFO [a7bef91497aa:34363 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-11T04:34:07,745 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=6ddada07afcf2fed42f4de9080a351bf, regionState=OPENING, regionLocation=a7bef91497aa,38817,1731299646525 2024-11-11T04:34:07,748 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=6ddada07afcf2fed42f4de9080a351bf, ASSIGN because future has completed 2024-11-11T04:34:07,748 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 6ddada07afcf2fed42f4de9080a351bf, server=a7bef91497aa,38817,1731299646525}] 2024-11-11T04:34:07,906 INFO [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnDatanodeDeath,,1731299647559.6ddada07afcf2fed42f4de9080a351bf. 2024-11-11T04:34:07,906 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 6ddada07afcf2fed42f4de9080a351bf, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1731299647559.6ddada07afcf2fed42f4de9080a351bf.', STARTKEY => '', ENDKEY => ''} 2024-11-11T04:34:07,906 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnDatanodeDeath 6ddada07afcf2fed42f4de9080a351bf 2024-11-11T04:34:07,907 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1731299647559.6ddada07afcf2fed42f4de9080a351bf.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T04:34:07,907 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 6ddada07afcf2fed42f4de9080a351bf 2024-11-11T04:34:07,907 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 6ddada07afcf2fed42f4de9080a351bf 2024-11-11T04:34:07,908 INFO [StoreOpener-6ddada07afcf2fed42f4de9080a351bf-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 6ddada07afcf2fed42f4de9080a351bf 2024-11-11T04:34:07,910 INFO [StoreOpener-6ddada07afcf2fed42f4de9080a351bf-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6ddada07afcf2fed42f4de9080a351bf columnFamilyName info 2024-11-11T04:34:07,910 DEBUG [StoreOpener-6ddada07afcf2fed42f4de9080a351bf-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:34:07,910 INFO [StoreOpener-6ddada07afcf2fed42f4de9080a351bf-1 {}] regionserver.HStore(327): Store=6ddada07afcf2fed42f4de9080a351bf/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T04:34:07,910 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 6ddada07afcf2fed42f4de9080a351bf 2024-11-11T04:34:07,911 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf 2024-11-11T04:34:07,911 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf 2024-11-11T04:34:07,912 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 6ddada07afcf2fed42f4de9080a351bf 2024-11-11T04:34:07,912 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 6ddada07afcf2fed42f4de9080a351bf 2024-11-11T04:34:07,913 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 6ddada07afcf2fed42f4de9080a351bf 2024-11-11T04:34:07,915 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T04:34:07,916 INFO [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 6ddada07afcf2fed42f4de9080a351bf; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=688435, jitterRate=-0.12461031973361969}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-11T04:34:07,916 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 6ddada07afcf2fed42f4de9080a351bf 2024-11-11T04:34:07,916 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 6ddada07afcf2fed42f4de9080a351bf: Running coprocessor pre-open hook at 1731299647907Writing region info on filesystem at 1731299647907Initializing all the Stores at 1731299647908 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731299647908Cleaning up temporary data from old regions at 1731299647912 (+4 ms)Running coprocessor post-open hooks at 1731299647916 (+4 ms)Region opened successfully at 1731299647916 2024-11-11T04:34:07,917 INFO [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnDatanodeDeath,,1731299647559.6ddada07afcf2fed42f4de9080a351bf., pid=6, masterSystemTime=1731299647902 2024-11-11T04:34:07,920 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnDatanodeDeath,,1731299647559.6ddada07afcf2fed42f4de9080a351bf. 2024-11-11T04:34:07,920 INFO [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnDatanodeDeath,,1731299647559.6ddada07afcf2fed42f4de9080a351bf. 2024-11-11T04:34:07,921 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=6ddada07afcf2fed42f4de9080a351bf, regionState=OPEN, openSeqNum=2, regionLocation=a7bef91497aa,38817,1731299646525 2024-11-11T04:34:07,924 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 6ddada07afcf2fed42f4de9080a351bf, server=a7bef91497aa,38817,1731299646525 because future has completed 2024-11-11T04:34:07,928 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-11T04:34:07,928 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 6ddada07afcf2fed42f4de9080a351bf, server=a7bef91497aa,38817,1731299646525 in 177 msec 2024-11-11T04:34:07,930 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-11T04:34:07,931 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=6ddada07afcf2fed42f4de9080a351bf, ASSIGN in 337 msec 2024-11-11T04:34:07,932 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-11T04:34:07,932 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731299647932"}]},"ts":"1731299647932"} 2024-11-11T04:34:07,934 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLED in hbase:meta 2024-11-11T04:34:07,935 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_POST_OPERATION 2024-11-11T04:34:07,937 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath in 375 msec 2024-11-11T04:34:08,079 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-11T04:34:08,080 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:34:08,109 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:34:08,113 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:34:08,114 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:34:12,959 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-11T04:34:12,961 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:34:12,978 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:34:12,979 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:34:12,980 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:34:12,987 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnDatanodeDeath' 2024-11-11T04:34:17,457 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-11T04:34:17,457 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath Metrics about Tables on a single HBase RegionServer 2024-11-11T04:34:17,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34363 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-11T04:34:17,655 INFO [RPCClient-NioEventLoopGroup-4-11 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnDatanodeDeath completed 2024-11-11T04:34:17,656 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnDatanodeDeath,, stopping at row=TestLogRolling-testLogRollOnDatanodeDeath ,, for max=2147483647 with caching=100 2024-11-11T04:34:17,659 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnDatanodeDeath 2024-11-11T04:34:17,659 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnDatanodeDeath,,1731299647559.6ddada07afcf2fed42f4de9080a351bf. 2024-11-11T04:34:17,672 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T04:34:17,675 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T04:34:17,675 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T04:34:17,676 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T04:34:17,676 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-11T04:34:17,676 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@75255721{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/hadoop.log.dir/,AVAILABLE} 2024-11-11T04:34:17,677 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@132d95f4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T04:34:17,787 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@65266587{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/java.io.tmpdir/jetty-localhost-39737-hadoop-hdfs-3_4_1-tests_jar-_-any-10277197862406763229/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T04:34:17,787 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2e157477{HTTP/1.1, (http/1.1)}{localhost:39737} 2024-11-11T04:34:17,787 INFO [Time-limited test {}] server.Server(415): Started @115589ms 2024-11-11T04:34:17,789 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T04:34:17,822 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T04:34:17,825 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T04:34:17,825 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T04:34:17,825 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T04:34:17,826 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-11T04:34:17,826 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7fbc343d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/hadoop.log.dir/,AVAILABLE} 2024-11-11T04:34:17,826 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3986ff43{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T04:34:17,881 WARN [Thread-829 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/cluster_8b97c0a2-b56e-2508-7199-0bcaa1af3817/data/data5/current/BP-454738336-172.17.0.2-1731299645786/current, will proceed with Du for space computation calculation, 2024-11-11T04:34:17,881 WARN [Thread-830 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/cluster_8b97c0a2-b56e-2508-7199-0bcaa1af3817/data/data6/current/BP-454738336-172.17.0.2-1731299645786/current, will proceed with Du for space computation calculation, 2024-11-11T04:34:17,900 WARN [Thread-809 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T04:34:17,902 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5334d8b5284f3766 with lease ID 0x57a3c7b2bc056b1e: Processing first storage report for DS-0c790adc-35bd-4d1e-9dfa-17a5b4907a46 from datanode DatanodeRegistration(127.0.0.1:32857, datanodeUuid=06000495-a633-489f-b061-5f3717d95432, infoPort=36347, infoSecurePort=0, ipcPort=32795, storageInfo=lv=-57;cid=testClusterID;nsid=1450183398;c=1731299645786) 2024-11-11T04:34:17,902 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5334d8b5284f3766 with lease ID 0x57a3c7b2bc056b1e: from storage DS-0c790adc-35bd-4d1e-9dfa-17a5b4907a46 node DatanodeRegistration(127.0.0.1:32857, datanodeUuid=06000495-a633-489f-b061-5f3717d95432, infoPort=36347, infoSecurePort=0, ipcPort=32795, storageInfo=lv=-57;cid=testClusterID;nsid=1450183398;c=1731299645786), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T04:34:17,902 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5334d8b5284f3766 with lease ID 0x57a3c7b2bc056b1e: Processing first storage report for DS-16f082f2-2252-4d18-a32c-c97f31ef5141 from datanode DatanodeRegistration(127.0.0.1:32857, datanodeUuid=06000495-a633-489f-b061-5f3717d95432, infoPort=36347, infoSecurePort=0, ipcPort=32795, storageInfo=lv=-57;cid=testClusterID;nsid=1450183398;c=1731299645786) 2024-11-11T04:34:17,902 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5334d8b5284f3766 with lease ID 0x57a3c7b2bc056b1e: from storage DS-16f082f2-2252-4d18-a32c-c97f31ef5141 node DatanodeRegistration(127.0.0.1:32857, datanodeUuid=06000495-a633-489f-b061-5f3717d95432, infoPort=36347, infoSecurePort=0, ipcPort=32795, storageInfo=lv=-57;cid=testClusterID;nsid=1450183398;c=1731299645786), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T04:34:17,945 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@458e5bcf{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/java.io.tmpdir/jetty-localhost-33437-hadoop-hdfs-3_4_1-tests_jar-_-any-12285888673496569984/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T04:34:17,946 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6b75ba45{HTTP/1.1, (http/1.1)}{localhost:33437} 2024-11-11T04:34:17,946 INFO [Time-limited test {}] server.Server(415): Started @115748ms 2024-11-11T04:34:17,948 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T04:34:17,984 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T04:34:17,987 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T04:34:17,988 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T04:34:17,988 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T04:34:17,988 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-11T04:34:17,989 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@495a6aea{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/hadoop.log.dir/,AVAILABLE} 2024-11-11T04:34:17,989 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@15751333{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T04:34:18,041 WARN [Thread-865 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/cluster_8b97c0a2-b56e-2508-7199-0bcaa1af3817/data/data8/current/BP-454738336-172.17.0.2-1731299645786/current, will proceed with Du for space computation calculation, 2024-11-11T04:34:18,041 WARN [Thread-864 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/cluster_8b97c0a2-b56e-2508-7199-0bcaa1af3817/data/data7/current/BP-454738336-172.17.0.2-1731299645786/current, will proceed with Du for space computation calculation, 2024-11-11T04:34:18,058 WARN [Thread-844 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T04:34:18,060 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa31fda4b20c512af with lease ID 0x57a3c7b2bc056b1f: Processing first storage report for DS-96e399a0-103e-4b17-ba00-79e62af7c104 from datanode DatanodeRegistration(127.0.0.1:45993, datanodeUuid=c22f2d70-f590-45e9-9ca1-b877c52e1b27, infoPort=43605, infoSecurePort=0, ipcPort=39039, storageInfo=lv=-57;cid=testClusterID;nsid=1450183398;c=1731299645786) 2024-11-11T04:34:18,061 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa31fda4b20c512af with lease ID 0x57a3c7b2bc056b1f: from storage DS-96e399a0-103e-4b17-ba00-79e62af7c104 node DatanodeRegistration(127.0.0.1:45993, datanodeUuid=c22f2d70-f590-45e9-9ca1-b877c52e1b27, infoPort=43605, infoSecurePort=0, ipcPort=39039, storageInfo=lv=-57;cid=testClusterID;nsid=1450183398;c=1731299645786), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T04:34:18,061 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa31fda4b20c512af with lease ID 0x57a3c7b2bc056b1f: Processing first storage report for DS-5b568df4-9932-466d-b94d-f019a32a8719 from datanode DatanodeRegistration(127.0.0.1:45993, datanodeUuid=c22f2d70-f590-45e9-9ca1-b877c52e1b27, infoPort=43605, infoSecurePort=0, ipcPort=39039, storageInfo=lv=-57;cid=testClusterID;nsid=1450183398;c=1731299645786) 2024-11-11T04:34:18,061 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa31fda4b20c512af with lease ID 0x57a3c7b2bc056b1f: from storage DS-5b568df4-9932-466d-b94d-f019a32a8719 node DatanodeRegistration(127.0.0.1:45993, datanodeUuid=c22f2d70-f590-45e9-9ca1-b877c52e1b27, infoPort=43605, infoSecurePort=0, ipcPort=39039, storageInfo=lv=-57;cid=testClusterID;nsid=1450183398;c=1731299645786), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T04:34:18,104 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@47d01054{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/java.io.tmpdir/jetty-localhost-34483-hadoop-hdfs-3_4_1-tests_jar-_-any-5606138312580851829/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T04:34:18,105 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@17900346{HTTP/1.1, (http/1.1)}{localhost:34483} 2024-11-11T04:34:18,105 INFO [Time-limited test {}] server.Server(415): Started @115907ms 2024-11-11T04:34:18,106 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T04:34:18,197 WARN [Thread-890 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/cluster_8b97c0a2-b56e-2508-7199-0bcaa1af3817/data/data9/current/BP-454738336-172.17.0.2-1731299645786/current, will proceed with Du for space computation calculation, 2024-11-11T04:34:18,197 WARN [Thread-891 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/cluster_8b97c0a2-b56e-2508-7199-0bcaa1af3817/data/data10/current/BP-454738336-172.17.0.2-1731299645786/current, will proceed with Du for space computation calculation, 2024-11-11T04:34:18,213 WARN [Thread-879 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T04:34:18,216 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x69bce84fd8f2b017 with lease ID 0x57a3c7b2bc056b20: Processing first storage report for DS-786c7f68-99f1-418b-b63e-c44144d0edb8 from datanode DatanodeRegistration(127.0.0.1:37241, datanodeUuid=fd7fc765-9cae-4915-88bd-d4d392096b05, infoPort=35903, infoSecurePort=0, ipcPort=36703, storageInfo=lv=-57;cid=testClusterID;nsid=1450183398;c=1731299645786) 2024-11-11T04:34:18,216 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x69bce84fd8f2b017 with lease ID 0x57a3c7b2bc056b20: from storage DS-786c7f68-99f1-418b-b63e-c44144d0edb8 node DatanodeRegistration(127.0.0.1:37241, datanodeUuid=fd7fc765-9cae-4915-88bd-d4d392096b05, infoPort=35903, infoSecurePort=0, ipcPort=36703, storageInfo=lv=-57;cid=testClusterID;nsid=1450183398;c=1731299645786), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-11T04:34:18,216 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x69bce84fd8f2b017 with lease ID 0x57a3c7b2bc056b20: Processing first storage report for DS-9bf45274-0945-40fc-bdcd-d9d60c43ab14 from datanode DatanodeRegistration(127.0.0.1:37241, datanodeUuid=fd7fc765-9cae-4915-88bd-d4d392096b05, infoPort=35903, infoSecurePort=0, ipcPort=36703, storageInfo=lv=-57;cid=testClusterID;nsid=1450183398;c=1731299645786) 2024-11-11T04:34:18,216 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x69bce84fd8f2b017 with lease ID 0x57a3c7b2bc056b20: from storage DS-9bf45274-0945-40fc-bdcd-d9d60c43ab14 node DatanodeRegistration(127.0.0.1:37241, datanodeUuid=fd7fc765-9cae-4915-88bd-d4d392096b05, infoPort=35903, infoSecurePort=0, ipcPort=36703, storageInfo=lv=-57;cid=testClusterID;nsid=1450183398;c=1731299645786), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T04:34:18,228 WARN [ResponseProcessor for block BP-454738336-172.17.0.2-1731299645786:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-454738336-172.17.0.2-1731299645786:blk_1073741834_1010 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:18,228 WARN [ResponseProcessor for block BP-454738336-172.17.0.2-1731299645786:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-454738336-172.17.0.2-1731299645786:blk_1073741830_1006 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:18,228 WARN [ResponseProcessor for block BP-454738336-172.17.0.2-1731299645786:blk_1073741837_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-454738336-172.17.0.2-1731299645786:blk_1073741837_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:18,228 WARN [DataStreamer for file /user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/MasterData/WALs/a7bef91497aa,34363,1731299646472/a7bef91497aa%2C34363%2C1731299646472.1731299646634 block BP-454738336-172.17.0.2-1731299645786:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-454738336-172.17.0.2-1731299645786:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40723,DS-0b19750a-ee85-4865-8aca-62398e1e42a1,DISK], DatanodeInfoWithStorage[127.0.0.1:46201,DS-a4611af3-51d9-4d80-83b3-808bf9f260d5,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40723,DS-0b19750a-ee85-4865-8aca-62398e1e42a1,DISK]) is bad. 2024-11-11T04:34:18,228 WARN [DataStreamer for file /user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 block BP-454738336-172.17.0.2-1731299645786:blk_1073741837_1013 {}] hdfs.DataStreamer(1731): Error Recovery for BP-454738336-172.17.0.2-1731299645786:blk_1073741837_1013 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40723,DS-0b19750a-ee85-4865-8aca-62398e1e42a1,DISK], DatanodeInfoWithStorage[127.0.0.1:46201,DS-a4611af3-51d9-4d80-83b3-808bf9f260d5,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40723,DS-0b19750a-ee85-4865-8aca-62398e1e42a1,DISK]) is bad. 2024-11-11T04:34:18,229 WARN [DataStreamer for file /user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta block BP-454738336-172.17.0.2-1731299645786:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-454738336-172.17.0.2-1731299645786:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40723,DS-0b19750a-ee85-4865-8aca-62398e1e42a1,DISK], DatanodeInfoWithStorage[127.0.0.1:46201,DS-a4611af3-51d9-4d80-83b3-808bf9f260d5,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40723,DS-0b19750a-ee85-4865-8aca-62398e1e42a1,DISK]) is bad. 2024-11-11T04:34:18,229 WARN [ResponseProcessor for block BP-454738336-172.17.0.2-1731299645786:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-454738336-172.17.0.2-1731299645786:blk_1073741833_1009 java.io.IOException: Bad response ERROR for BP-454738336-172.17.0.2-1731299645786:blk_1073741833_1009 from datanode DatanodeInfoWithStorage[127.0.0.1:40723,DS-0b19750a-ee85-4865-8aca-62398e1e42a1,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:18,229 WARN [DataStreamer for file /user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.1731299646945 block BP-454738336-172.17.0.2-1731299645786:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-454738336-172.17.0.2-1731299645786:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46201,DS-a4611af3-51d9-4d80-83b3-808bf9f260d5,DISK], DatanodeInfoWithStorage[127.0.0.1:40723,DS-0b19750a-ee85-4865-8aca-62398e1e42a1,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40723,DS-0b19750a-ee85-4865-8aca-62398e1e42a1,DISK]) is bad. 2024-11-11T04:34:18,229 WARN [PacketResponder: BP-454738336-172.17.0.2-1731299645786:blk_1073741833_1009, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:40723] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:34:18,229 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2086005195_22 at /127.0.0.1:52840 [Receiving block BP-454738336-172.17.0.2-1731299645786:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:40723:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52840 dst: /127.0.0.1:40723 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:34:18,230 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@21d5e4af{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T04:34:18,230 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-667751035_22 at /127.0.0.1:42244 [Receiving block BP-454738336-172.17.0.2-1731299645786:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:46201:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42244 dst: /127.0.0.1:46201 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:34:18,230 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2086005195_22 at /127.0.0.1:42290 [Receiving block BP-454738336-172.17.0.2-1731299645786:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:46201:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42290 dst: /127.0.0.1:46201 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:34:18,230 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-667751035_22 at /127.0.0.1:42260 [Receiving block BP-454738336-172.17.0.2-1731299645786:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:46201:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42260 dst: /127.0.0.1:46201 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:34:18,230 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-667751035_22 at /127.0.0.1:52810 [Receiving block BP-454738336-172.17.0.2-1731299645786:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:40723:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52810 dst: /127.0.0.1:40723 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:34:18,231 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@145c0180{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T04:34:18,231 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_517426701_22 at /127.0.0.1:52764 [Receiving block BP-454738336-172.17.0.2-1731299645786:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:40723:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52764 dst: /127.0.0.1:40723 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:34:18,231 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T04:34:18,231 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7720beab{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T04:34:18,231 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-667751035_22 at /127.0.0.1:52794 [Receiving block BP-454738336-172.17.0.2-1731299645786:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:40723:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52794 dst: /127.0.0.1:40723 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:34:18,231 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_517426701_22 at /127.0.0.1:42222 [Receiving block BP-454738336-172.17.0.2-1731299645786:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:46201:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42222 dst: /127.0.0.1:46201 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:34:18,232 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@69bbaec1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/hadoop.log.dir/,STOPPED} 2024-11-11T04:34:18,233 WARN [BP-454738336-172.17.0.2-1731299645786 heartbeating to localhost/127.0.0.1:41327 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T04:34:18,233 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T04:34:18,233 WARN [BP-454738336-172.17.0.2-1731299645786 heartbeating to localhost/127.0.0.1:41327 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-454738336-172.17.0.2-1731299645786 (Datanode Uuid 9cbb48e6-377e-490c-b963-e735d7cbd305) service to localhost/127.0.0.1:41327 2024-11-11T04:34:18,233 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T04:34:18,234 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/cluster_8b97c0a2-b56e-2508-7199-0bcaa1af3817/data/data3/current/BP-454738336-172.17.0.2-1731299645786 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T04:34:18,234 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/cluster_8b97c0a2-b56e-2508-7199-0bcaa1af3817/data/data4/current/BP-454738336-172.17.0.2-1731299645786 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T04:34:18,235 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T04:34:18,235 ERROR [org.apache.hadoop.hdfs.server.datanode.DataXceiver@36273239 {}] datanode.DataXceiver(331): 127.0.0.1:46201:DataXceiver error processing unknown operation src: /127.0.0.1:53018 dst: /127.0.0.1:46201 java.io.IOException: Server closed. at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.addPeer(DataXceiverServer.java:334) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:232) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:34:18,235 WARN [DataStreamer for file /user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.1731299646945 block BP-454738336-172.17.0.2-1731299645786:blk_1073741833_1009 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741833_1009 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:18,235 WARN [DataStreamer for file /user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta block BP-454738336-172.17.0.2-1731299645786:blk_1073741834_1010 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741834_1010 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:18,235 WARN [DataStreamer for file /user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 block BP-454738336-172.17.0.2-1731299645786:blk_1073741837_1013 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741837_1013 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:18,236 WARN [DataStreamer for file /user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/MasterData/WALs/a7bef91497aa,34363,1731299646472/a7bef91497aa%2C34363%2C1731299646472.1731299646634 block BP-454738336-172.17.0.2-1731299645786:blk_1073741830_1006 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741830_1006 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:18,237 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@ab5393f{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T04:34:18,237 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@47f82e76{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T04:34:18,238 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T04:34:18,238 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@30e7c448{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T04:34:18,238 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@257cf4bb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/hadoop.log.dir/,STOPPED} 2024-11-11T04:34:18,239 WARN [BP-454738336-172.17.0.2-1731299645786 heartbeating to localhost/127.0.0.1:41327 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T04:34:18,239 WARN [BP-454738336-172.17.0.2-1731299645786 heartbeating to localhost/127.0.0.1:41327 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-454738336-172.17.0.2-1731299645786 (Datanode Uuid 9239dcf8-2eea-4d80-baa3-fdf988b27043) service to localhost/127.0.0.1:41327 2024-11-11T04:34:18,239 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T04:34:18,239 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T04:34:18,239 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/cluster_8b97c0a2-b56e-2508-7199-0bcaa1af3817/data/data1/current/BP-454738336-172.17.0.2-1731299645786 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T04:34:18,240 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/cluster_8b97c0a2-b56e-2508-7199-0bcaa1af3817/data/data2/current/BP-454738336-172.17.0.2-1731299645786 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T04:34:18,240 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T04:34:18,243 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnDatanodeDeath', row='row0002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnDatanodeDeath,,1731299647559.6ddada07afcf2fed42f4de9080a351bf., hostname=a7bef91497aa,38817,1731299646525, seqNum=2] 2024-11-11T04:34:18,245 ERROR [FSHLog-0-hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094-prefix:a7bef91497aa,38817,1731299646525 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46201,DS-a4611af3-51d9-4d80-83b3-808bf9f260d5,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:18,245 WARN [FSHLog-0-hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094-prefix:a7bef91497aa,38817,1731299646525 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46201,DS-a4611af3-51d9-4d80-83b3-808bf9f260d5,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:18,245 INFO [regionserver/a7bef91497aa:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46201,DS-a4611af3-51d9-4d80-83b3-808bf9f260d5,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:18,246 DEBUG [regionserver/a7bef91497aa:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog a7bef91497aa%2C38817%2C1731299646525:(num 1731299646945) roll requested 2024-11-11T04:34:18,246 INFO [regionserver/a7bef91497aa:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor a7bef91497aa%2C38817%2C1731299646525.1731299658246 2024-11-11T04:34:18,249 WARN [Thread-902 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741838_1018 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:18,249 WARN [Thread-902 {}] hdfs.DataStreamer(1731): Error Recovery for BP-454738336-172.17.0.2-1731299645786:blk_1073741838_1018 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40723,DS-0b19750a-ee85-4865-8aca-62398e1e42a1,DISK], DatanodeInfoWithStorage[127.0.0.1:37241,DS-786c7f68-99f1-418b-b63e-c44144d0edb8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40723,DS-0b19750a-ee85-4865-8aca-62398e1e42a1,DISK]) is bad. 2024-11-11T04:34:18,249 WARN [Thread-902 {}] hdfs.DataStreamer(1850): Abandoning BP-454738336-172.17.0.2-1731299645786:blk_1073741838_1018 2024-11-11T04:34:18,251 WARN [Thread-902 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40723,DS-0b19750a-ee85-4865-8aca-62398e1e42a1,DISK] 2024-11-11T04:34:18,258 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:34:18,258 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:34:18,258 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:34:18,258 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:34:18,258 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:34:18,259 INFO [regionserver/a7bef91497aa:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.1731299646945 with entries=1, filesize=455 B; new WAL /user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.1731299658246 2024-11-11T04:34:18,259 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46201,DS-a4611af3-51d9-4d80-83b3-808bf9f260d5,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:18,259 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46201,DS-a4611af3-51d9-4d80-83b3-808bf9f260d5,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:18,259 DEBUG [regionserver/a7bef91497aa:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35903:35903),(127.0.0.1/127.0.0.1:36347:36347)] 2024-11-11T04:34:18,259 DEBUG [regionserver/a7bef91497aa:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.1731299646945 is not closed yet, will try archiving it next time 2024-11-11T04:34:18,260 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-11-11T04:34:18,261 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-11-11T04:34:18,261 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.1731299646945 2024-11-11T04:34:18,264 WARN [IPC Server handler 1 on default port 41327 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.1731299646945 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741833_1009 2024-11-11T04:34:18,268 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.1731299646945 after 5ms 2024-11-11T04:34:18,611 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46201,DS-a4611af3-51d9-4d80-83b3-808bf9f260d5,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:19,527 INFO [regionserver/a7bef91497aa:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46201,DS-a4611af3-51d9-4d80-83b3-808bf9f260d5,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:20,260 INFO [regionserver/a7bef91497aa:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46201,DS-a4611af3-51d9-4d80-83b3-808bf9f260d5,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:20,261 INFO [Time-limited test {}] wal.TestLogRolling(261): log.getCurrentFileName(): hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.1731299658246 2024-11-11T04:34:20,262 WARN [ResponseProcessor for block BP-454738336-172.17.0.2-1731299645786:blk_1073741839_1019 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-454738336-172.17.0.2-1731299645786:blk_1073741839_1019 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:20,262 WARN [DataStreamer for file /user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.1731299658246 block BP-454738336-172.17.0.2-1731299645786:blk_1073741839_1019 {}] hdfs.DataStreamer(1731): Error Recovery for BP-454738336-172.17.0.2-1731299645786:blk_1073741839_1019 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37241,DS-786c7f68-99f1-418b-b63e-c44144d0edb8,DISK], DatanodeInfoWithStorage[127.0.0.1:32857,DS-0c790adc-35bd-4d1e-9dfa-17a5b4907a46,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37241,DS-786c7f68-99f1-418b-b63e-c44144d0edb8,DISK]) is bad. 2024-11-11T04:34:20,262 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-667751035_22 at /127.0.0.1:55966 [Receiving block BP-454738336-172.17.0.2-1731299645786:blk_1073741839_1019] {}] datanode.DataXceiver(331): 127.0.0.1:37241:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55966 dst: /127.0.0.1:37241 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:34:20,263 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-667751035_22 at /127.0.0.1:58408 [Receiving block BP-454738336-172.17.0.2-1731299645786:blk_1073741839_1019] {}] datanode.DataXceiver(331): 127.0.0.1:32857:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58408 dst: /127.0.0.1:32857 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:34:20,264 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@47d01054{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T04:34:20,264 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@17900346{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T04:34:20,264 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T04:34:20,264 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@15751333{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T04:34:20,264 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@495a6aea{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/hadoop.log.dir/,STOPPED} 2024-11-11T04:34:20,266 WARN [BP-454738336-172.17.0.2-1731299645786 heartbeating to localhost/127.0.0.1:41327 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T04:34:20,266 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T04:34:20,266 WARN [BP-454738336-172.17.0.2-1731299645786 heartbeating to localhost/127.0.0.1:41327 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-454738336-172.17.0.2-1731299645786 (Datanode Uuid fd7fc765-9cae-4915-88bd-d4d392096b05) service to localhost/127.0.0.1:41327 2024-11-11T04:34:20,266 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T04:34:20,266 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/cluster_8b97c0a2-b56e-2508-7199-0bcaa1af3817/data/data9/current/BP-454738336-172.17.0.2-1731299645786 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T04:34:20,266 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/cluster_8b97c0a2-b56e-2508-7199-0bcaa1af3817/data/data10/current/BP-454738336-172.17.0.2-1731299645786 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T04:34:20,267 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T04:34:20,611 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46201,DS-a4611af3-51d9-4d80-83b3-808bf9f260d5,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:21,527 INFO [regionserver/a7bef91497aa:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46201,DS-a4611af3-51d9-4d80-83b3-808bf9f260d5,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:22,260 INFO [regionserver/a7bef91497aa:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46201,DS-a4611af3-51d9-4d80-83b3-808bf9f260d5,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:22,261 WARN [regionserver/a7bef91497aa:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:32857,DS-0c790adc-35bd-4d1e-9dfa-17a5b4907a46,DISK]] 2024-11-11T04:34:22,261 DEBUG [regionserver/a7bef91497aa:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog a7bef91497aa%2C38817%2C1731299646525:(num 1731299658246) roll requested 2024-11-11T04:34:22,261 INFO [regionserver/a7bef91497aa:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor a7bef91497aa%2C38817%2C1731299646525.1731299662261 2024-11-11T04:34:22,265 WARN [Thread-911 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741840_1022 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:46201 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:22,265 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-667751035_22 at /127.0.0.1:54692 [Receiving block BP-454738336-172.17.0.2-1731299645786:blk_1073741840_1022] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/cluster_8b97c0a2-b56e-2508-7199-0bcaa1af3817/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/cluster_8b97c0a2-b56e-2508-7199-0bcaa1af3817/data/data8]'}, localName='127.0.0.1:45993', datanodeUuid='c22f2d70-f590-45e9-9ca1-b877c52e1b27', xmitsInProgress=0}:Exception transferring block BP-454738336-172.17.0.2-1731299645786:blk_1073741840_1022 to mirror 127.0.0.1:46201 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:34:22,266 WARN [Thread-911 {}] hdfs.DataStreamer(1731): Error Recovery for BP-454738336-172.17.0.2-1731299645786:blk_1073741840_1022 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45993,DS-96e399a0-103e-4b17-ba00-79e62af7c104,DISK], DatanodeInfoWithStorage[127.0.0.1:46201,DS-a4611af3-51d9-4d80-83b3-808bf9f260d5,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:46201,DS-a4611af3-51d9-4d80-83b3-808bf9f260d5,DISK]) is bad. 2024-11-11T04:34:22,266 WARN [Thread-911 {}] hdfs.DataStreamer(1850): Abandoning BP-454738336-172.17.0.2-1731299645786:blk_1073741840_1022 2024-11-11T04:34:22,266 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-667751035_22 at /127.0.0.1:54692 [Receiving block BP-454738336-172.17.0.2-1731299645786:blk_1073741840_1022] {}] datanode.BlockReceiver(316): Block 1073741840 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-11T04:34:22,266 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-667751035_22 at /127.0.0.1:54692 [Receiving block BP-454738336-172.17.0.2-1731299645786:blk_1073741840_1022] {}] datanode.DataXceiver(331): 127.0.0.1:45993:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54692 dst: /127.0.0.1:45993 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:34:22,266 WARN [Thread-911 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46201,DS-a4611af3-51d9-4d80-83b3-808bf9f260d5,DISK] 2024-11-11T04:34:22,268 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.1731299646945 after 4007ms 2024-11-11T04:34:22,270 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:34:22,270 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:34:22,270 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:34:22,271 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:34:22,271 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:34:22,271 INFO [regionserver/a7bef91497aa:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.1731299658246 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.1731299662261 2024-11-11T04:34:22,271 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-11T04:34:22,272 DEBUG [regionserver/a7bef91497aa:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36347:36347),(127.0.0.1/127.0.0.1:43605:43605)] 2024-11-11T04:34:22,272 DEBUG [regionserver/a7bef91497aa:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.1731299646945 is not closed yet, will try archiving it next time 2024-11-11T04:34:22,272 DEBUG [regionserver/a7bef91497aa:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.1731299658246 is not closed yet, will try archiving it next time 2024-11-11T04:34:22,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32857 is added to blk_1073741839_1021 (size=2431) 2024-11-11T04:34:22,611 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46201,DS-a4611af3-51d9-4d80-83b3-808bf9f260d5,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:22,674 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.1731299646945 is not closed yet, will try archiving it next time 2024-11-11T04:34:23,527 INFO [regionserver/a7bef91497aa:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46201,DS-a4611af3-51d9-4d80-83b3-808bf9f260d5,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:24,272 INFO [regionserver/a7bef91497aa:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46201,DS-a4611af3-51d9-4d80-83b3-808bf9f260d5,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:24,275 WARN [ResponseProcessor for block BP-454738336-172.17.0.2-1731299645786:blk_1073741841_1023 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-454738336-172.17.0.2-1731299645786:blk_1073741841_1023 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:24,276 WARN [DataStreamer for file /user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.1731299662261 block BP-454738336-172.17.0.2-1731299645786:blk_1073741841_1023 {}] hdfs.DataStreamer(1731): Error Recovery for BP-454738336-172.17.0.2-1731299645786:blk_1073741841_1023 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32857,DS-0c790adc-35bd-4d1e-9dfa-17a5b4907a46,DISK], DatanodeInfoWithStorage[127.0.0.1:45993,DS-96e399a0-103e-4b17-ba00-79e62af7c104,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32857,DS-0c790adc-35bd-4d1e-9dfa-17a5b4907a46,DISK]) is bad. 2024-11-11T04:34:24,276 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-667751035_22 at /127.0.0.1:44220 [Receiving block BP-454738336-172.17.0.2-1731299645786:blk_1073741841_1023] {}] datanode.DataXceiver(331): 127.0.0.1:32857:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44220 dst: /127.0.0.1:32857 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:34:24,276 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-667751035_22 at /127.0.0.1:54704 [Receiving block BP-454738336-172.17.0.2-1731299645786:blk_1073741841_1023] {}] datanode.DataXceiver(331): 127.0.0.1:45993:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54704 dst: /127.0.0.1:45993 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:34:24,277 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@65266587{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T04:34:24,278 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2e157477{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T04:34:24,278 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T04:34:24,278 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@132d95f4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T04:34:24,278 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@75255721{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/hadoop.log.dir/,STOPPED} 2024-11-11T04:34:24,279 WARN [BP-454738336-172.17.0.2-1731299645786 heartbeating to localhost/127.0.0.1:41327 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T04:34:24,279 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T04:34:24,279 WARN [BP-454738336-172.17.0.2-1731299645786 heartbeating to localhost/127.0.0.1:41327 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-454738336-172.17.0.2-1731299645786 (Datanode Uuid 06000495-a633-489f-b061-5f3717d95432) service to localhost/127.0.0.1:41327 2024-11-11T04:34:24,279 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T04:34:24,280 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/cluster_8b97c0a2-b56e-2508-7199-0bcaa1af3817/data/data5/current/BP-454738336-172.17.0.2-1731299645786 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T04:34:24,280 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/cluster_8b97c0a2-b56e-2508-7199-0bcaa1af3817/data/data6/current/BP-454738336-172.17.0.2-1731299645786 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T04:34:24,280 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T04:34:24,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38817 {}] regionserver.HRegion(8855): Flush requested on 6ddada07afcf2fed42f4de9080a351bf 2024-11-11T04:34:24,288 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 6ddada07afcf2fed42f4de9080a351bf 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-11T04:34:24,305 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf/.tmp/info/2962c2a55e4b458e93d24dbc48216093 is 1080, key is row0002/info:/1731299660268/Put/seqid=0 2024-11-11T04:34:24,308 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-667751035_22 at /127.0.0.1:54722 [Receiving block BP-454738336-172.17.0.2-1731299645786:blk_1073741842_1025] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/cluster_8b97c0a2-b56e-2508-7199-0bcaa1af3817/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/cluster_8b97c0a2-b56e-2508-7199-0bcaa1af3817/data/data8]'}, localName='127.0.0.1:45993', datanodeUuid='c22f2d70-f590-45e9-9ca1-b877c52e1b27', xmitsInProgress=0}:Exception transferring block BP-454738336-172.17.0.2-1731299645786:blk_1073741842_1025 to mirror 127.0.0.1:46201 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:34:24,308 WARN [Thread-922 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741842_1025 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:46201 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:24,308 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-667751035_22 at /127.0.0.1:54722 [Receiving block BP-454738336-172.17.0.2-1731299645786:blk_1073741842_1025] {}] datanode.BlockReceiver(316): Block 1073741842 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-11T04:34:24,308 WARN [Thread-922 {}] hdfs.DataStreamer(1731): Error Recovery for BP-454738336-172.17.0.2-1731299645786:blk_1073741842_1025 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45993,DS-96e399a0-103e-4b17-ba00-79e62af7c104,DISK], DatanodeInfoWithStorage[127.0.0.1:46201,DS-a4611af3-51d9-4d80-83b3-808bf9f260d5,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:46201,DS-a4611af3-51d9-4d80-83b3-808bf9f260d5,DISK]) is bad. 2024-11-11T04:34:24,308 WARN [Thread-922 {}] hdfs.DataStreamer(1850): Abandoning BP-454738336-172.17.0.2-1731299645786:blk_1073741842_1025 2024-11-11T04:34:24,308 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-667751035_22 at /127.0.0.1:54722 [Receiving block BP-454738336-172.17.0.2-1731299645786:blk_1073741842_1025] {}] datanode.DataXceiver(331): 127.0.0.1:45993:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54722 dst: /127.0.0.1:45993 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:34:24,309 WARN [Thread-922 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46201,DS-a4611af3-51d9-4d80-83b3-808bf9f260d5,DISK] 2024-11-11T04:34:24,311 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-667751035_22 at /127.0.0.1:54738 [Receiving block BP-454738336-172.17.0.2-1731299645786:blk_1073741843_1026] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/cluster_8b97c0a2-b56e-2508-7199-0bcaa1af3817/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/cluster_8b97c0a2-b56e-2508-7199-0bcaa1af3817/data/data8]'}, localName='127.0.0.1:45993', datanodeUuid='c22f2d70-f590-45e9-9ca1-b877c52e1b27', xmitsInProgress=0}:Exception transferring block BP-454738336-172.17.0.2-1731299645786:blk_1073741843_1026 to mirror 127.0.0.1:40723 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:34:24,311 WARN [Thread-922 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741843_1026 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40723 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:24,311 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-667751035_22 at /127.0.0.1:54738 [Receiving block BP-454738336-172.17.0.2-1731299645786:blk_1073741843_1026] {}] datanode.BlockReceiver(316): Block 1073741843 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-11T04:34:24,311 WARN [Thread-922 {}] hdfs.DataStreamer(1731): Error Recovery for BP-454738336-172.17.0.2-1731299645786:blk_1073741843_1026 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45993,DS-96e399a0-103e-4b17-ba00-79e62af7c104,DISK], DatanodeInfoWithStorage[127.0.0.1:40723,DS-0b19750a-ee85-4865-8aca-62398e1e42a1,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40723,DS-0b19750a-ee85-4865-8aca-62398e1e42a1,DISK]) is bad. 2024-11-11T04:34:24,311 WARN [Thread-922 {}] hdfs.DataStreamer(1850): Abandoning BP-454738336-172.17.0.2-1731299645786:blk_1073741843_1026 2024-11-11T04:34:24,311 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-667751035_22 at /127.0.0.1:54738 [Receiving block BP-454738336-172.17.0.2-1731299645786:blk_1073741843_1026] {}] datanode.DataXceiver(331): 127.0.0.1:45993:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54738 dst: /127.0.0.1:45993 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:34:24,312 WARN [Thread-922 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40723,DS-0b19750a-ee85-4865-8aca-62398e1e42a1,DISK] 2024-11-11T04:34:24,314 WARN [Thread-922 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741844_1027 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:32857 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:24,314 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-667751035_22 at /127.0.0.1:54750 [Receiving block BP-454738336-172.17.0.2-1731299645786:blk_1073741844_1027] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/cluster_8b97c0a2-b56e-2508-7199-0bcaa1af3817/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/cluster_8b97c0a2-b56e-2508-7199-0bcaa1af3817/data/data8]'}, localName='127.0.0.1:45993', datanodeUuid='c22f2d70-f590-45e9-9ca1-b877c52e1b27', xmitsInProgress=0}:Exception transferring block BP-454738336-172.17.0.2-1731299645786:blk_1073741844_1027 to mirror 127.0.0.1:32857 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:34:24,314 WARN [Thread-922 {}] hdfs.DataStreamer(1731): Error Recovery for BP-454738336-172.17.0.2-1731299645786:blk_1073741844_1027 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45993,DS-96e399a0-103e-4b17-ba00-79e62af7c104,DISK], DatanodeInfoWithStorage[127.0.0.1:32857,DS-0c790adc-35bd-4d1e-9dfa-17a5b4907a46,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:32857,DS-0c790adc-35bd-4d1e-9dfa-17a5b4907a46,DISK]) is bad. 2024-11-11T04:34:24,314 WARN [Thread-922 {}] hdfs.DataStreamer(1850): Abandoning BP-454738336-172.17.0.2-1731299645786:blk_1073741844_1027 2024-11-11T04:34:24,314 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-667751035_22 at /127.0.0.1:54750 [Receiving block BP-454738336-172.17.0.2-1731299645786:blk_1073741844_1027] {}] datanode.BlockReceiver(316): Block 1073741844 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-11T04:34:24,314 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-667751035_22 at /127.0.0.1:54750 [Receiving block BP-454738336-172.17.0.2-1731299645786:blk_1073741844_1027] {}] datanode.DataXceiver(331): 127.0.0.1:45993:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54750 dst: /127.0.0.1:45993 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:34:24,314 WARN [Thread-922 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32857,DS-0c790adc-35bd-4d1e-9dfa-17a5b4907a46,DISK] 2024-11-11T04:34:24,316 WARN [Thread-922 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741845_1028 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:24,316 WARN [Thread-922 {}] hdfs.DataStreamer(1731): Error Recovery for BP-454738336-172.17.0.2-1731299645786:blk_1073741845_1028 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37241,DS-786c7f68-99f1-418b-b63e-c44144d0edb8,DISK], DatanodeInfoWithStorage[127.0.0.1:45993,DS-96e399a0-103e-4b17-ba00-79e62af7c104,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37241,DS-786c7f68-99f1-418b-b63e-c44144d0edb8,DISK]) is bad. 2024-11-11T04:34:24,316 WARN [Thread-922 {}] hdfs.DataStreamer(1850): Abandoning BP-454738336-172.17.0.2-1731299645786:blk_1073741845_1028 2024-11-11T04:34:24,317 WARN [Thread-922 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37241,DS-786c7f68-99f1-418b-b63e-c44144d0edb8,DISK] 2024-11-11T04:34:24,317 WARN [IPC Server handler 1 on default port 41327 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-11T04:34:24,317 WARN [IPC Server handler 1 on default port 41327 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-11T04:34:24,317 WARN [IPC Server handler 1 on default port 41327 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-11T04:34:24,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45993 is added to blk_1073741846_1029 (size=10347) 2024-11-11T04:34:24,612 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46201,DS-a4611af3-51d9-4d80-83b3-808bf9f260d5,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:24,721 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf/.tmp/info/2962c2a55e4b458e93d24dbc48216093 2024-11-11T04:34:24,730 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf/.tmp/info/2962c2a55e4b458e93d24dbc48216093 as hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf/info/2962c2a55e4b458e93d24dbc48216093 2024-11-11T04:34:24,736 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf/info/2962c2a55e4b458e93d24dbc48216093, entries=5, sequenceid=11, filesize=10.1 K 2024-11-11T04:34:24,737 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=9.45 KB/9681 for 6ddada07afcf2fed42f4de9080a351bf in 449ms, sequenceid=11, compaction requested=false 2024-11-11T04:34:24,737 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 6ddada07afcf2fed42f4de9080a351bf: 2024-11-11T04:34:24,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38817 {}] regionserver.HRegion(8855): Flush requested on 6ddada07afcf2fed42f4de9080a351bf 2024-11-11T04:34:24,909 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 6ddada07afcf2fed42f4de9080a351bf 1/1 column families, dataSize=10.50 KB heapSize=11.50 KB 2024-11-11T04:34:24,914 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf/.tmp/info/9536cc38e21e4b5fb7e4fe6ec9e57a23 is 1080, key is row0007/info:/1731299664289/Put/seqid=0 2024-11-11T04:34:24,915 WARN [Thread-930 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741847_1030 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:24,915 WARN [Thread-930 {}] hdfs.DataStreamer(1731): Error Recovery for BP-454738336-172.17.0.2-1731299645786:blk_1073741847_1030 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32857,DS-0c790adc-35bd-4d1e-9dfa-17a5b4907a46,DISK], DatanodeInfoWithStorage[127.0.0.1:40723,DS-0b19750a-ee85-4865-8aca-62398e1e42a1,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32857,DS-0c790adc-35bd-4d1e-9dfa-17a5b4907a46,DISK]) is bad. 2024-11-11T04:34:24,916 WARN [Thread-930 {}] hdfs.DataStreamer(1850): Abandoning BP-454738336-172.17.0.2-1731299645786:blk_1073741847_1030 2024-11-11T04:34:24,916 WARN [Thread-930 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32857,DS-0c790adc-35bd-4d1e-9dfa-17a5b4907a46,DISK] 2024-11-11T04:34:24,917 WARN [Thread-930 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741848_1031 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:24,917 WARN [Thread-930 {}] hdfs.DataStreamer(1731): Error Recovery for BP-454738336-172.17.0.2-1731299645786:blk_1073741848_1031 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46201,DS-a4611af3-51d9-4d80-83b3-808bf9f260d5,DISK], DatanodeInfoWithStorage[127.0.0.1:40723,DS-0b19750a-ee85-4865-8aca-62398e1e42a1,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46201,DS-a4611af3-51d9-4d80-83b3-808bf9f260d5,DISK]) is bad. 2024-11-11T04:34:24,917 WARN [Thread-930 {}] hdfs.DataStreamer(1850): Abandoning BP-454738336-172.17.0.2-1731299645786:blk_1073741848_1031 2024-11-11T04:34:24,918 WARN [Thread-930 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46201,DS-a4611af3-51d9-4d80-83b3-808bf9f260d5,DISK] 2024-11-11T04:34:24,919 WARN [Thread-930 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741849_1032 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:24,919 WARN [Thread-930 {}] hdfs.DataStreamer(1731): Error Recovery for BP-454738336-172.17.0.2-1731299645786:blk_1073741849_1032 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40723,DS-0b19750a-ee85-4865-8aca-62398e1e42a1,DISK], DatanodeInfoWithStorage[127.0.0.1:45993,DS-96e399a0-103e-4b17-ba00-79e62af7c104,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40723,DS-0b19750a-ee85-4865-8aca-62398e1e42a1,DISK]) is bad. 2024-11-11T04:34:24,919 WARN [Thread-930 {}] hdfs.DataStreamer(1850): Abandoning BP-454738336-172.17.0.2-1731299645786:blk_1073741849_1032 2024-11-11T04:34:24,920 WARN [Thread-930 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40723,DS-0b19750a-ee85-4865-8aca-62398e1e42a1,DISK] 2024-11-11T04:34:24,922 WARN [Thread-930 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741850_1033 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37241 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:24,921 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-667751035_22 at /127.0.0.1:54772 [Receiving block BP-454738336-172.17.0.2-1731299645786:blk_1073741850_1033] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/cluster_8b97c0a2-b56e-2508-7199-0bcaa1af3817/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/cluster_8b97c0a2-b56e-2508-7199-0bcaa1af3817/data/data8]'}, localName='127.0.0.1:45993', datanodeUuid='c22f2d70-f590-45e9-9ca1-b877c52e1b27', xmitsInProgress=0}:Exception transferring block BP-454738336-172.17.0.2-1731299645786:blk_1073741850_1033 to mirror 127.0.0.1:37241 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:34:24,922 WARN [Thread-930 {}] hdfs.DataStreamer(1731): Error Recovery for BP-454738336-172.17.0.2-1731299645786:blk_1073741850_1033 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45993,DS-96e399a0-103e-4b17-ba00-79e62af7c104,DISK], DatanodeInfoWithStorage[127.0.0.1:37241,DS-786c7f68-99f1-418b-b63e-c44144d0edb8,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37241,DS-786c7f68-99f1-418b-b63e-c44144d0edb8,DISK]) is bad. 2024-11-11T04:34:24,922 WARN [Thread-930 {}] hdfs.DataStreamer(1850): Abandoning BP-454738336-172.17.0.2-1731299645786:blk_1073741850_1033 2024-11-11T04:34:24,922 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-667751035_22 at /127.0.0.1:54772 [Receiving block BP-454738336-172.17.0.2-1731299645786:blk_1073741850_1033] {}] datanode.BlockReceiver(316): Block 1073741850 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-11T04:34:24,922 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-667751035_22 at /127.0.0.1:54772 [Receiving block BP-454738336-172.17.0.2-1731299645786:blk_1073741850_1033] {}] datanode.DataXceiver(331): 127.0.0.1:45993:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54772 dst: /127.0.0.1:45993 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:34:24,922 WARN [Thread-930 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37241,DS-786c7f68-99f1-418b-b63e-c44144d0edb8,DISK] 2024-11-11T04:34:24,923 WARN [IPC Server handler 1 on default port 41327 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-11T04:34:24,923 WARN [IPC Server handler 1 on default port 41327 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-11T04:34:24,923 WARN [IPC Server handler 1 on default port 41327 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-11T04:34:24,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45993 is added to blk_1073741851_1034 (size=12506) 2024-11-11T04:34:24,926 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.50 KB at sequenceid=24 (bloomFilter=true), to=hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf/.tmp/info/9536cc38e21e4b5fb7e4fe6ec9e57a23 2024-11-11T04:34:24,932 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf/.tmp/info/9536cc38e21e4b5fb7e4fe6ec9e57a23 as hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf/info/9536cc38e21e4b5fb7e4fe6ec9e57a23 2024-11-11T04:34:24,937 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf/info/9536cc38e21e4b5fb7e4fe6ec9e57a23, entries=7, sequenceid=24, filesize=12.2 K 2024-11-11T04:34:24,938 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.50 KB/10756, heapSize ~11.48 KB/11760, currentSize=0 B/0 for 6ddada07afcf2fed42f4de9080a351bf in 29ms, sequenceid=24, compaction requested=false 2024-11-11T04:34:24,938 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 6ddada07afcf2fed42f4de9080a351bf: 2024-11-11T04:34:24,939 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=22.3 K, sizeToCheck=16.0 K 2024-11-11T04:34:24,939 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T04:34:24,939 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf/info/9536cc38e21e4b5fb7e4fe6ec9e57a23 because midkey is the same as first or last row 2024-11-11T04:34:25,528 INFO [regionserver/a7bef91497aa:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46201,DS-a4611af3-51d9-4d80-83b3-808bf9f260d5,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:26,272 INFO [regionserver/a7bef91497aa:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46201,DS-a4611af3-51d9-4d80-83b3-808bf9f260d5,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:26,273 WARN [regionserver/a7bef91497aa:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45993,DS-96e399a0-103e-4b17-ba00-79e62af7c104,DISK]] 2024-11-11T04:34:26,273 DEBUG [regionserver/a7bef91497aa:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog a7bef91497aa%2C38817%2C1731299646525:(num 1731299662261) roll requested 2024-11-11T04:34:26,273 INFO [regionserver/a7bef91497aa:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor a7bef91497aa%2C38817%2C1731299646525.1731299666273 2024-11-11T04:34:26,276 WARN [Thread-935 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741852_1035 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:26,276 WARN [Thread-935 {}] hdfs.DataStreamer(1731): Error Recovery for BP-454738336-172.17.0.2-1731299645786:blk_1073741852_1035 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37241,DS-786c7f68-99f1-418b-b63e-c44144d0edb8,DISK], DatanodeInfoWithStorage[127.0.0.1:40723,DS-0b19750a-ee85-4865-8aca-62398e1e42a1,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37241,DS-786c7f68-99f1-418b-b63e-c44144d0edb8,DISK]) is bad. 2024-11-11T04:34:26,276 WARN [Thread-935 {}] hdfs.DataStreamer(1850): Abandoning BP-454738336-172.17.0.2-1731299645786:blk_1073741852_1035 2024-11-11T04:34:26,277 WARN [Thread-935 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37241,DS-786c7f68-99f1-418b-b63e-c44144d0edb8,DISK] 2024-11-11T04:34:26,279 WARN [Thread-935 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741853_1036 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:46201 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:26,279 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-667751035_22 at /127.0.0.1:54790 [Receiving block BP-454738336-172.17.0.2-1731299645786:blk_1073741853_1036] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/cluster_8b97c0a2-b56e-2508-7199-0bcaa1af3817/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/cluster_8b97c0a2-b56e-2508-7199-0bcaa1af3817/data/data8]'}, localName='127.0.0.1:45993', datanodeUuid='c22f2d70-f590-45e9-9ca1-b877c52e1b27', xmitsInProgress=0}:Exception transferring block BP-454738336-172.17.0.2-1731299645786:blk_1073741853_1036 to mirror 127.0.0.1:46201 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:34:26,279 WARN [Thread-935 {}] hdfs.DataStreamer(1731): Error Recovery for BP-454738336-172.17.0.2-1731299645786:blk_1073741853_1036 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45993,DS-96e399a0-103e-4b17-ba00-79e62af7c104,DISK], DatanodeInfoWithStorage[127.0.0.1:46201,DS-a4611af3-51d9-4d80-83b3-808bf9f260d5,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:46201,DS-a4611af3-51d9-4d80-83b3-808bf9f260d5,DISK]) is bad. 2024-11-11T04:34:26,280 WARN [Thread-935 {}] hdfs.DataStreamer(1850): Abandoning BP-454738336-172.17.0.2-1731299645786:blk_1073741853_1036 2024-11-11T04:34:26,280 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-667751035_22 at /127.0.0.1:54790 [Receiving block BP-454738336-172.17.0.2-1731299645786:blk_1073741853_1036] {}] datanode.BlockReceiver(316): Block 1073741853 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-11T04:34:26,280 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-667751035_22 at /127.0.0.1:54790 [Receiving block BP-454738336-172.17.0.2-1731299645786:blk_1073741853_1036] {}] datanode.DataXceiver(331): 127.0.0.1:45993:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54790 dst: /127.0.0.1:45993 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:34:26,280 WARN [Thread-935 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46201,DS-a4611af3-51d9-4d80-83b3-808bf9f260d5,DISK] 2024-11-11T04:34:26,281 WARN [Thread-935 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741854_1037 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:26,281 WARN [Thread-935 {}] hdfs.DataStreamer(1731): Error Recovery for BP-454738336-172.17.0.2-1731299645786:blk_1073741854_1037 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40723,DS-0b19750a-ee85-4865-8aca-62398e1e42a1,DISK], DatanodeInfoWithStorage[127.0.0.1:45993,DS-96e399a0-103e-4b17-ba00-79e62af7c104,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40723,DS-0b19750a-ee85-4865-8aca-62398e1e42a1,DISK]) is bad. 2024-11-11T04:34:26,281 WARN [Thread-935 {}] hdfs.DataStreamer(1850): Abandoning BP-454738336-172.17.0.2-1731299645786:blk_1073741854_1037 2024-11-11T04:34:26,282 WARN [Thread-935 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40723,DS-0b19750a-ee85-4865-8aca-62398e1e42a1,DISK] 2024-11-11T04:34:26,284 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-667751035_22 at /127.0.0.1:54792 [Receiving block BP-454738336-172.17.0.2-1731299645786:blk_1073741855_1038] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/cluster_8b97c0a2-b56e-2508-7199-0bcaa1af3817/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/cluster_8b97c0a2-b56e-2508-7199-0bcaa1af3817/data/data8]'}, localName='127.0.0.1:45993', datanodeUuid='c22f2d70-f590-45e9-9ca1-b877c52e1b27', xmitsInProgress=0}:Exception transferring block BP-454738336-172.17.0.2-1731299645786:blk_1073741855_1038 to mirror 127.0.0.1:32857 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:34:26,284 WARN [Thread-935 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741855_1038 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:32857 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:26,284 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-667751035_22 at /127.0.0.1:54792 [Receiving block BP-454738336-172.17.0.2-1731299645786:blk_1073741855_1038] {}] datanode.BlockReceiver(316): Block 1073741855 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-11T04:34:26,284 WARN [Thread-935 {}] hdfs.DataStreamer(1731): Error Recovery for BP-454738336-172.17.0.2-1731299645786:blk_1073741855_1038 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45993,DS-96e399a0-103e-4b17-ba00-79e62af7c104,DISK], DatanodeInfoWithStorage[127.0.0.1:32857,DS-0c790adc-35bd-4d1e-9dfa-17a5b4907a46,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:32857,DS-0c790adc-35bd-4d1e-9dfa-17a5b4907a46,DISK]) is bad. 2024-11-11T04:34:26,284 WARN [Thread-935 {}] hdfs.DataStreamer(1850): Abandoning BP-454738336-172.17.0.2-1731299645786:blk_1073741855_1038 2024-11-11T04:34:26,284 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-667751035_22 at /127.0.0.1:54792 [Receiving block BP-454738336-172.17.0.2-1731299645786:blk_1073741855_1038] {}] datanode.DataXceiver(331): 127.0.0.1:45993:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54792 dst: /127.0.0.1:45993 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:34:26,285 WARN [Thread-935 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32857,DS-0c790adc-35bd-4d1e-9dfa-17a5b4907a46,DISK] 2024-11-11T04:34:26,285 WARN [IPC Server handler 0 on default port 41327 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-11T04:34:26,285 WARN [IPC Server handler 0 on default port 41327 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-11T04:34:26,285 WARN [IPC Server handler 0 on default port 41327 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-11T04:34:26,287 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:34:26,287 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:34:26,288 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:34:26,288 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:34:26,288 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:34:26,288 INFO [regionserver/a7bef91497aa:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.1731299662261 with entries=25, filesize=25.38 KB; new WAL /user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.1731299666273 2024-11-11T04:34:26,289 DEBUG [regionserver/a7bef91497aa:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43605:43605)] 2024-11-11T04:34:26,289 DEBUG [regionserver/a7bef91497aa:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.1731299646945 is not closed yet, will try archiving it next time 2024-11-11T04:34:26,289 DEBUG [regionserver/a7bef91497aa:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.1731299662261 is not closed yet, will try archiving it next time 2024-11-11T04:34:26,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45993 is added to blk_1073741841_1024 (size=25992) 2024-11-11T04:34:26,290 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.1731299658246 to hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/oldWALs/a7bef91497aa%2C38817%2C1731299646525.1731299658246 2024-11-11T04:34:26,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38817 {}] regionserver.HRegion(8855): Flush requested on 6ddada07afcf2fed42f4de9080a351bf 2024-11-11T04:34:26,328 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 6ddada07afcf2fed42f4de9080a351bf 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-11T04:34:26,332 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf/.tmp/info/4f9b612e2b734cddaf316fa0ebb7c9e4 is 1079, key is tmprow/info:/1731299666327/Put/seqid=0 2024-11-11T04:34:26,334 WARN [Thread-941 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741857_1040 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:26,334 WARN [Thread-941 {}] hdfs.DataStreamer(1731): Error Recovery for BP-454738336-172.17.0.2-1731299645786:blk_1073741857_1040 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40723,DS-0b19750a-ee85-4865-8aca-62398e1e42a1,DISK], DatanodeInfoWithStorage[127.0.0.1:37241,DS-786c7f68-99f1-418b-b63e-c44144d0edb8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40723,DS-0b19750a-ee85-4865-8aca-62398e1e42a1,DISK]) is bad. 2024-11-11T04:34:26,334 WARN [Thread-941 {}] hdfs.DataStreamer(1850): Abandoning BP-454738336-172.17.0.2-1731299645786:blk_1073741857_1040 2024-11-11T04:34:26,334 WARN [Thread-941 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40723,DS-0b19750a-ee85-4865-8aca-62398e1e42a1,DISK] 2024-11-11T04:34:26,335 WARN [Thread-941 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741858_1041 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:26,336 WARN [Thread-941 {}] hdfs.DataStreamer(1731): Error Recovery for BP-454738336-172.17.0.2-1731299645786:blk_1073741858_1041 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32857,DS-0c790adc-35bd-4d1e-9dfa-17a5b4907a46,DISK], DatanodeInfoWithStorage[127.0.0.1:37241,DS-786c7f68-99f1-418b-b63e-c44144d0edb8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32857,DS-0c790adc-35bd-4d1e-9dfa-17a5b4907a46,DISK]) is bad. 2024-11-11T04:34:26,336 WARN [Thread-941 {}] hdfs.DataStreamer(1850): Abandoning BP-454738336-172.17.0.2-1731299645786:blk_1073741858_1041 2024-11-11T04:34:26,336 WARN [Thread-941 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32857,DS-0c790adc-35bd-4d1e-9dfa-17a5b4907a46,DISK] 2024-11-11T04:34:26,338 WARN [Thread-941 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741859_1042 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:46201 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:26,338 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-667751035_22 at /127.0.0.1:54804 [Receiving block BP-454738336-172.17.0.2-1731299645786:blk_1073741859_1042] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/cluster_8b97c0a2-b56e-2508-7199-0bcaa1af3817/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/cluster_8b97c0a2-b56e-2508-7199-0bcaa1af3817/data/data8]'}, localName='127.0.0.1:45993', datanodeUuid='c22f2d70-f590-45e9-9ca1-b877c52e1b27', xmitsInProgress=0}:Exception transferring block BP-454738336-172.17.0.2-1731299645786:blk_1073741859_1042 to mirror 127.0.0.1:46201 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:34:26,339 WARN [Thread-941 {}] hdfs.DataStreamer(1731): Error Recovery for BP-454738336-172.17.0.2-1731299645786:blk_1073741859_1042 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45993,DS-96e399a0-103e-4b17-ba00-79e62af7c104,DISK], DatanodeInfoWithStorage[127.0.0.1:46201,DS-a4611af3-51d9-4d80-83b3-808bf9f260d5,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:46201,DS-a4611af3-51d9-4d80-83b3-808bf9f260d5,DISK]) is bad. 2024-11-11T04:34:26,339 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-667751035_22 at /127.0.0.1:54804 [Receiving block BP-454738336-172.17.0.2-1731299645786:blk_1073741859_1042] {}] datanode.BlockReceiver(316): Block 1073741859 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-11T04:34:26,339 WARN [Thread-941 {}] hdfs.DataStreamer(1850): Abandoning BP-454738336-172.17.0.2-1731299645786:blk_1073741859_1042 2024-11-11T04:34:26,339 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-667751035_22 at /127.0.0.1:54804 [Receiving block BP-454738336-172.17.0.2-1731299645786:blk_1073741859_1042] {}] datanode.DataXceiver(331): 127.0.0.1:45993:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54804 dst: /127.0.0.1:45993 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:34:26,339 WARN [Thread-941 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46201,DS-a4611af3-51d9-4d80-83b3-808bf9f260d5,DISK] 2024-11-11T04:34:26,340 WARN [Thread-941 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741860_1043 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:26,340 WARN [Thread-941 {}] hdfs.DataStreamer(1731): Error Recovery for BP-454738336-172.17.0.2-1731299645786:blk_1073741860_1043 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37241,DS-786c7f68-99f1-418b-b63e-c44144d0edb8,DISK], DatanodeInfoWithStorage[127.0.0.1:45993,DS-96e399a0-103e-4b17-ba00-79e62af7c104,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37241,DS-786c7f68-99f1-418b-b63e-c44144d0edb8,DISK]) is bad. 2024-11-11T04:34:26,341 WARN [Thread-941 {}] hdfs.DataStreamer(1850): Abandoning BP-454738336-172.17.0.2-1731299645786:blk_1073741860_1043 2024-11-11T04:34:26,341 WARN [Thread-941 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37241,DS-786c7f68-99f1-418b-b63e-c44144d0edb8,DISK] 2024-11-11T04:34:26,342 WARN [IPC Server handler 2 on default port 41327 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-11T04:34:26,342 WARN [IPC Server handler 2 on default port 41327 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-11T04:34:26,342 WARN [IPC Server handler 2 on default port 41327 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-11T04:34:26,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45993 is added to blk_1073741861_1044 (size=6027) 2024-11-11T04:34:26,612 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46201,DS-a4611af3-51d9-4d80-83b3-808bf9f260d5,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:26,690 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.1731299646945 is not closed yet, will try archiving it next time 2024-11-11T04:34:26,746 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=34 (bloomFilter=true), to=hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf/.tmp/info/4f9b612e2b734cddaf316fa0ebb7c9e4 2024-11-11T04:34:26,752 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf/.tmp/info/4f9b612e2b734cddaf316fa0ebb7c9e4 as hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf/info/4f9b612e2b734cddaf316fa0ebb7c9e4 2024-11-11T04:34:26,758 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf/info/4f9b612e2b734cddaf316fa0ebb7c9e4, entries=1, sequenceid=34, filesize=5.9 K 2024-11-11T04:34:26,759 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 6ddada07afcf2fed42f4de9080a351bf in 431ms, sequenceid=34, compaction requested=true 2024-11-11T04:34:26,759 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 6ddada07afcf2fed42f4de9080a351bf: 2024-11-11T04:34:26,759 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.2 K, sizeToCheck=16.0 K 2024-11-11T04:34:26,759 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T04:34:26,759 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf/info/9536cc38e21e4b5fb7e4fe6ec9e57a23 because midkey is the same as first or last row 2024-11-11T04:34:26,760 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6ddada07afcf2fed42f4de9080a351bf:info, priority=-2147483648, current under compaction store size is 1 2024-11-11T04:34:26,760 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T04:34:26,760 DEBUG [RS:0;a7bef91497aa:38817-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T04:34:26,761 DEBUG [RS:0;a7bef91497aa:38817-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 28880 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T04:34:26,761 DEBUG [RS:0;a7bef91497aa:38817-shortCompactions-0 {}] regionserver.HStore(1541): 6ddada07afcf2fed42f4de9080a351bf/info is initiating minor compaction (all files) 2024-11-11T04:34:26,761 INFO [RS:0;a7bef91497aa:38817-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 6ddada07afcf2fed42f4de9080a351bf/info in TestLogRolling-testLogRollOnDatanodeDeath,,1731299647559.6ddada07afcf2fed42f4de9080a351bf. 2024-11-11T04:34:26,761 INFO [RS:0;a7bef91497aa:38817-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf/info/2962c2a55e4b458e93d24dbc48216093, hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf/info/9536cc38e21e4b5fb7e4fe6ec9e57a23, hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf/info/4f9b612e2b734cddaf316fa0ebb7c9e4] into tmpdir=hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf/.tmp, totalSize=28.2 K 2024-11-11T04:34:26,761 DEBUG [RS:0;a7bef91497aa:38817-shortCompactions-0 {}] compactions.Compactor(225): Compacting 2962c2a55e4b458e93d24dbc48216093, keycount=5, bloomtype=ROW, size=10.1 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731299660268 2024-11-11T04:34:26,762 DEBUG [RS:0;a7bef91497aa:38817-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9536cc38e21e4b5fb7e4fe6ec9e57a23, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=24, earliestPutTs=1731299664289 2024-11-11T04:34:26,762 DEBUG [RS:0;a7bef91497aa:38817-shortCompactions-0 {}] compactions.Compactor(225): Compacting 4f9b612e2b734cddaf316fa0ebb7c9e4, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1731299666327 2024-11-11T04:34:26,775 INFO [RS:0;a7bef91497aa:38817-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6ddada07afcf2fed42f4de9080a351bf#info#compaction#21 average throughput is 6.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T04:34:26,776 DEBUG [RS:0;a7bef91497aa:38817-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf/.tmp/info/9025954da55f4d128b42822fc60ea5b7 is 1080, key is row0002/info:/1731299660268/Put/seqid=0 2024-11-11T04:34:26,777 WARN [Thread-948 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741862_1045 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:26,777 WARN [Thread-948 {}] hdfs.DataStreamer(1731): Error Recovery for BP-454738336-172.17.0.2-1731299645786:blk_1073741862_1045 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40723,DS-0b19750a-ee85-4865-8aca-62398e1e42a1,DISK], DatanodeInfoWithStorage[127.0.0.1:46201,DS-a4611af3-51d9-4d80-83b3-808bf9f260d5,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40723,DS-0b19750a-ee85-4865-8aca-62398e1e42a1,DISK]) is bad. 2024-11-11T04:34:26,777 WARN [Thread-948 {}] hdfs.DataStreamer(1850): Abandoning BP-454738336-172.17.0.2-1731299645786:blk_1073741862_1045 2024-11-11T04:34:26,778 WARN [Thread-948 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40723,DS-0b19750a-ee85-4865-8aca-62398e1e42a1,DISK] 2024-11-11T04:34:26,780 WARN [Thread-948 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741863_1046 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:46201 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:26,781 WARN [Thread-948 {}] hdfs.DataStreamer(1731): Error Recovery for BP-454738336-172.17.0.2-1731299645786:blk_1073741863_1046 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45993,DS-96e399a0-103e-4b17-ba00-79e62af7c104,DISK], DatanodeInfoWithStorage[127.0.0.1:46201,DS-a4611af3-51d9-4d80-83b3-808bf9f260d5,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:46201,DS-a4611af3-51d9-4d80-83b3-808bf9f260d5,DISK]) is bad. 2024-11-11T04:34:26,780 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-667751035_22 at /127.0.0.1:54840 [Receiving block BP-454738336-172.17.0.2-1731299645786:blk_1073741863_1046] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/cluster_8b97c0a2-b56e-2508-7199-0bcaa1af3817/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/cluster_8b97c0a2-b56e-2508-7199-0bcaa1af3817/data/data8]'}, localName='127.0.0.1:45993', datanodeUuid='c22f2d70-f590-45e9-9ca1-b877c52e1b27', xmitsInProgress=0}:Exception transferring block BP-454738336-172.17.0.2-1731299645786:blk_1073741863_1046 to mirror 127.0.0.1:46201 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:34:26,781 WARN [Thread-948 {}] hdfs.DataStreamer(1850): Abandoning BP-454738336-172.17.0.2-1731299645786:blk_1073741863_1046 2024-11-11T04:34:26,781 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-667751035_22 at /127.0.0.1:54840 [Receiving block BP-454738336-172.17.0.2-1731299645786:blk_1073741863_1046] {}] datanode.BlockReceiver(316): Block 1073741863 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-11T04:34:26,781 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-667751035_22 at /127.0.0.1:54840 [Receiving block BP-454738336-172.17.0.2-1731299645786:blk_1073741863_1046] {}] datanode.DataXceiver(331): 127.0.0.1:45993:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54840 dst: /127.0.0.1:45993 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:34:26,781 WARN [Thread-948 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46201,DS-a4611af3-51d9-4d80-83b3-808bf9f260d5,DISK] 2024-11-11T04:34:26,782 WARN [Thread-948 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741864_1047 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:26,782 WARN [Thread-948 {}] hdfs.DataStreamer(1731): Error Recovery for BP-454738336-172.17.0.2-1731299645786:blk_1073741864_1047 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32857,DS-0c790adc-35bd-4d1e-9dfa-17a5b4907a46,DISK], DatanodeInfoWithStorage[127.0.0.1:45993,DS-96e399a0-103e-4b17-ba00-79e62af7c104,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32857,DS-0c790adc-35bd-4d1e-9dfa-17a5b4907a46,DISK]) is bad. 2024-11-11T04:34:26,782 WARN [Thread-948 {}] hdfs.DataStreamer(1850): Abandoning BP-454738336-172.17.0.2-1731299645786:blk_1073741864_1047 2024-11-11T04:34:26,783 WARN [Thread-948 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32857,DS-0c790adc-35bd-4d1e-9dfa-17a5b4907a46,DISK] 2024-11-11T04:34:26,785 WARN [Thread-948 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741865_1048 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37241 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:26,785 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-667751035_22 at /127.0.0.1:54844 [Receiving block BP-454738336-172.17.0.2-1731299645786:blk_1073741865_1048] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/cluster_8b97c0a2-b56e-2508-7199-0bcaa1af3817/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/cluster_8b97c0a2-b56e-2508-7199-0bcaa1af3817/data/data8]'}, localName='127.0.0.1:45993', datanodeUuid='c22f2d70-f590-45e9-9ca1-b877c52e1b27', xmitsInProgress=0}:Exception transferring block BP-454738336-172.17.0.2-1731299645786:blk_1073741865_1048 to mirror 127.0.0.1:37241 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:34:26,785 WARN [Thread-948 {}] hdfs.DataStreamer(1731): Error Recovery for BP-454738336-172.17.0.2-1731299645786:blk_1073741865_1048 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45993,DS-96e399a0-103e-4b17-ba00-79e62af7c104,DISK], DatanodeInfoWithStorage[127.0.0.1:37241,DS-786c7f68-99f1-418b-b63e-c44144d0edb8,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37241,DS-786c7f68-99f1-418b-b63e-c44144d0edb8,DISK]) is bad. 2024-11-11T04:34:26,785 WARN [Thread-948 {}] hdfs.DataStreamer(1850): Abandoning BP-454738336-172.17.0.2-1731299645786:blk_1073741865_1048 2024-11-11T04:34:26,785 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-667751035_22 at /127.0.0.1:54844 [Receiving block BP-454738336-172.17.0.2-1731299645786:blk_1073741865_1048] {}] datanode.BlockReceiver(316): Block 1073741865 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-11T04:34:26,785 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-667751035_22 at /127.0.0.1:54844 [Receiving block BP-454738336-172.17.0.2-1731299645786:blk_1073741865_1048] {}] datanode.DataXceiver(331): 127.0.0.1:45993:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54844 dst: /127.0.0.1:45993 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:34:26,786 WARN [Thread-948 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37241,DS-786c7f68-99f1-418b-b63e-c44144d0edb8,DISK] 2024-11-11T04:34:26,786 WARN [IPC Server handler 4 on default port 41327 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-11T04:34:26,786 WARN [IPC Server handler 4 on default port 41327 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-11T04:34:26,786 WARN [IPC Server handler 4 on default port 41327 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-11T04:34:26,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45993 is added to blk_1073741866_1049 (size=17994) 2024-11-11T04:34:27,067 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@4f6f10a[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:45993, datanodeUuid=c22f2d70-f590-45e9-9ca1-b877c52e1b27, infoPort=43605, infoSecurePort=0, ipcPort=39039, storageInfo=lv=-57;cid=testClusterID;nsid=1450183398;c=1731299645786):Failed to transfer BP-454738336-172.17.0.2-1731299645786:blk_1073741851_1034 to 127.0.0.1:32857 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:34:27,067 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@7026a979[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:45993, datanodeUuid=c22f2d70-f590-45e9-9ca1-b877c52e1b27, infoPort=43605, infoSecurePort=0, ipcPort=39039, storageInfo=lv=-57;cid=testClusterID;nsid=1450183398;c=1731299645786):Failed to transfer BP-454738336-172.17.0.2-1731299645786:blk_1073741846_1029 to 127.0.0.1:46201 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:34:27,197 DEBUG [RS:0;a7bef91497aa:38817-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf/.tmp/info/9025954da55f4d128b42822fc60ea5b7 as hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf/info/9025954da55f4d128b42822fc60ea5b7 2024-11-11T04:34:27,203 INFO [RS:0;a7bef91497aa:38817-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 6ddada07afcf2fed42f4de9080a351bf/info of 6ddada07afcf2fed42f4de9080a351bf into 9025954da55f4d128b42822fc60ea5b7(size=17.6 K), total size for store is 17.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T04:34:27,203 DEBUG [RS:0;a7bef91497aa:38817-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 6ddada07afcf2fed42f4de9080a351bf: 2024-11-11T04:34:27,203 INFO [RS:0;a7bef91497aa:38817-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1731299647559.6ddada07afcf2fed42f4de9080a351bf., storeName=6ddada07afcf2fed42f4de9080a351bf/info, priority=13, startTime=1731299666759; duration=0sec 2024-11-11T04:34:27,204 DEBUG [RS:0;a7bef91497aa:38817-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-11T04:34:27,204 DEBUG [RS:0;a7bef91497aa:38817-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T04:34:27,204 DEBUG [RS:0;a7bef91497aa:38817-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf/info/9025954da55f4d128b42822fc60ea5b7 because midkey is the same as first or last row 2024-11-11T04:34:27,204 DEBUG [RS:0;a7bef91497aa:38817-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-11T04:34:27,204 DEBUG [RS:0;a7bef91497aa:38817-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T04:34:27,204 DEBUG [RS:0;a7bef91497aa:38817-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf/info/9025954da55f4d128b42822fc60ea5b7 because midkey is the same as first or last row 2024-11-11T04:34:27,204 DEBUG [RS:0;a7bef91497aa:38817-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-11T04:34:27,204 DEBUG [RS:0;a7bef91497aa:38817-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T04:34:27,204 DEBUG [RS:0;a7bef91497aa:38817-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf/info/9025954da55f4d128b42822fc60ea5b7 because midkey is the same as first or last row 2024-11-11T04:34:27,204 DEBUG [RS:0;a7bef91497aa:38817-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T04:34:27,204 DEBUG [RS:0;a7bef91497aa:38817-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6ddada07afcf2fed42f4de9080a351bf:info 2024-11-11T04:34:27,528 INFO [regionserver/a7bef91497aa:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46201,DS-a4611af3-51d9-4d80-83b3-808bf9f260d5,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:27,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38817 {}] regionserver.HRegion(8855): Flush requested on 6ddada07afcf2fed42f4de9080a351bf 2024-11-11T04:34:27,745 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 6ddada07afcf2fed42f4de9080a351bf 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-11T04:34:27,750 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf/.tmp/info/5184cac00a10495c9b0aa7d51dadd8a3 is 1079, key is tmprow/info:/1731299667744/Put/seqid=0 2024-11-11T04:34:27,751 WARN [Thread-957 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741867_1050 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:27,752 WARN [Thread-957 {}] hdfs.DataStreamer(1731): Error Recovery for BP-454738336-172.17.0.2-1731299645786:blk_1073741867_1050 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37241,DS-786c7f68-99f1-418b-b63e-c44144d0edb8,DISK], DatanodeInfoWithStorage[127.0.0.1:46201,DS-a4611af3-51d9-4d80-83b3-808bf9f260d5,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37241,DS-786c7f68-99f1-418b-b63e-c44144d0edb8,DISK]) is bad. 2024-11-11T04:34:27,752 WARN [Thread-957 {}] hdfs.DataStreamer(1850): Abandoning BP-454738336-172.17.0.2-1731299645786:blk_1073741867_1050 2024-11-11T04:34:27,752 WARN [Thread-957 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37241,DS-786c7f68-99f1-418b-b63e-c44144d0edb8,DISK] 2024-11-11T04:34:27,753 WARN [Thread-957 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741868_1051 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:27,754 WARN [Thread-957 {}] hdfs.DataStreamer(1731): Error Recovery for BP-454738336-172.17.0.2-1731299645786:blk_1073741868_1051 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32857,DS-0c790adc-35bd-4d1e-9dfa-17a5b4907a46,DISK], DatanodeInfoWithStorage[127.0.0.1:40723,DS-0b19750a-ee85-4865-8aca-62398e1e42a1,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32857,DS-0c790adc-35bd-4d1e-9dfa-17a5b4907a46,DISK]) is bad. 2024-11-11T04:34:27,754 WARN [Thread-957 {}] hdfs.DataStreamer(1850): Abandoning BP-454738336-172.17.0.2-1731299645786:blk_1073741868_1051 2024-11-11T04:34:27,754 WARN [Thread-957 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32857,DS-0c790adc-35bd-4d1e-9dfa-17a5b4907a46,DISK] 2024-11-11T04:34:27,755 WARN [Thread-957 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741869_1052 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:27,755 WARN [Thread-957 {}] hdfs.DataStreamer(1731): Error Recovery for BP-454738336-172.17.0.2-1731299645786:blk_1073741869_1052 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46201,DS-a4611af3-51d9-4d80-83b3-808bf9f260d5,DISK], DatanodeInfoWithStorage[127.0.0.1:40723,DS-0b19750a-ee85-4865-8aca-62398e1e42a1,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46201,DS-a4611af3-51d9-4d80-83b3-808bf9f260d5,DISK]) is bad. 2024-11-11T04:34:27,755 WARN [Thread-957 {}] hdfs.DataStreamer(1850): Abandoning BP-454738336-172.17.0.2-1731299645786:blk_1073741869_1052 2024-11-11T04:34:27,756 WARN [Thread-957 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46201,DS-a4611af3-51d9-4d80-83b3-808bf9f260d5,DISK] 2024-11-11T04:34:27,758 WARN [Thread-957 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741870_1053 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40723 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:27,758 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-667751035_22 at /127.0.0.1:54852 [Receiving block BP-454738336-172.17.0.2-1731299645786:blk_1073741870_1053] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/cluster_8b97c0a2-b56e-2508-7199-0bcaa1af3817/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/cluster_8b97c0a2-b56e-2508-7199-0bcaa1af3817/data/data8]'}, localName='127.0.0.1:45993', datanodeUuid='c22f2d70-f590-45e9-9ca1-b877c52e1b27', xmitsInProgress=0}:Exception transferring block BP-454738336-172.17.0.2-1731299645786:blk_1073741870_1053 to mirror 127.0.0.1:40723 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:34:27,758 WARN [Thread-957 {}] hdfs.DataStreamer(1731): Error Recovery for BP-454738336-172.17.0.2-1731299645786:blk_1073741870_1053 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45993,DS-96e399a0-103e-4b17-ba00-79e62af7c104,DISK], DatanodeInfoWithStorage[127.0.0.1:40723,DS-0b19750a-ee85-4865-8aca-62398e1e42a1,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40723,DS-0b19750a-ee85-4865-8aca-62398e1e42a1,DISK]) is bad. 2024-11-11T04:34:27,758 WARN [Thread-957 {}] hdfs.DataStreamer(1850): Abandoning BP-454738336-172.17.0.2-1731299645786:blk_1073741870_1053 2024-11-11T04:34:27,758 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-667751035_22 at /127.0.0.1:54852 [Receiving block BP-454738336-172.17.0.2-1731299645786:blk_1073741870_1053] {}] datanode.BlockReceiver(316): Block 1073741870 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-11T04:34:27,758 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-667751035_22 at /127.0.0.1:54852 [Receiving block BP-454738336-172.17.0.2-1731299645786:blk_1073741870_1053] {}] datanode.DataXceiver(331): 127.0.0.1:45993:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54852 dst: /127.0.0.1:45993 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:34:27,759 WARN [Thread-957 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40723,DS-0b19750a-ee85-4865-8aca-62398e1e42a1,DISK] 2024-11-11T04:34:27,759 WARN [IPC Server handler 0 on default port 41327 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-11T04:34:27,759 WARN [IPC Server handler 0 on default port 41327 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-11T04:34:27,759 WARN [IPC Server handler 0 on default port 41327 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-11T04:34:27,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45993 is added to blk_1073741871_1054 (size=6027) 2024-11-11T04:34:28,061 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@4f6f10a[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:45993, datanodeUuid=c22f2d70-f590-45e9-9ca1-b877c52e1b27, infoPort=43605, infoSecurePort=0, ipcPort=39039, storageInfo=lv=-57;cid=testClusterID;nsid=1450183398;c=1731299645786):Failed to transfer BP-454738336-172.17.0.2-1731299645786:blk_1073741861_1044 to 127.0.0.1:40723 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:34:28,061 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@7026a979[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:45993, datanodeUuid=c22f2d70-f590-45e9-9ca1-b877c52e1b27, infoPort=43605, infoSecurePort=0, ipcPort=39039, storageInfo=lv=-57;cid=testClusterID;nsid=1450183398;c=1731299645786):Failed to transfer BP-454738336-172.17.0.2-1731299645786:blk_1073741841_1024 to 127.0.0.1:32857 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:34:28,163 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=45 (bloomFilter=true), to=hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf/.tmp/info/5184cac00a10495c9b0aa7d51dadd8a3 2024-11-11T04:34:28,169 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf/.tmp/info/5184cac00a10495c9b0aa7d51dadd8a3 as hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf/info/5184cac00a10495c9b0aa7d51dadd8a3 2024-11-11T04:34:28,175 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf/info/5184cac00a10495c9b0aa7d51dadd8a3, entries=1, sequenceid=45, filesize=5.9 K 2024-11-11T04:34:28,176 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 6ddada07afcf2fed42f4de9080a351bf in 431ms, sequenceid=45, compaction requested=false 2024-11-11T04:34:28,176 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 6ddada07afcf2fed42f4de9080a351bf: 2024-11-11T04:34:28,176 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.5 K, sizeToCheck=16.0 K 2024-11-11T04:34:28,176 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T04:34:28,176 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf/info/9025954da55f4d128b42822fc60ea5b7 because midkey is the same as first or last row 2024-11-11T04:34:28,289 INFO [regionserver/a7bef91497aa:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46201,DS-a4611af3-51d9-4d80-83b3-808bf9f260d5,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:28,290 WARN [regionserver/a7bef91497aa:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45993,DS-96e399a0-103e-4b17-ba00-79e62af7c104,DISK]] 2024-11-11T04:34:28,290 DEBUG [regionserver/a7bef91497aa:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog a7bef91497aa%2C38817%2C1731299646525:(num 1731299666273) roll requested 2024-11-11T04:34:28,290 INFO [regionserver/a7bef91497aa:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor a7bef91497aa%2C38817%2C1731299646525.1731299668290 2024-11-11T04:34:28,293 WARN [Thread-962 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741872_1055 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:28,293 WARN [Thread-962 {}] hdfs.DataStreamer(1731): Error Recovery for BP-454738336-172.17.0.2-1731299645786:blk_1073741872_1055 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32857,DS-0c790adc-35bd-4d1e-9dfa-17a5b4907a46,DISK], DatanodeInfoWithStorage[127.0.0.1:37241,DS-786c7f68-99f1-418b-b63e-c44144d0edb8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32857,DS-0c790adc-35bd-4d1e-9dfa-17a5b4907a46,DISK]) is bad. 2024-11-11T04:34:28,293 WARN [Thread-962 {}] hdfs.DataStreamer(1850): Abandoning BP-454738336-172.17.0.2-1731299645786:blk_1073741872_1055 2024-11-11T04:34:28,293 WARN [Thread-962 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32857,DS-0c790adc-35bd-4d1e-9dfa-17a5b4907a46,DISK] 2024-11-11T04:34:28,295 WARN [Thread-962 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741873_1056 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:46201 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:28,295 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-667751035_22 at /127.0.0.1:54874 [Receiving block BP-454738336-172.17.0.2-1731299645786:blk_1073741873_1056] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/cluster_8b97c0a2-b56e-2508-7199-0bcaa1af3817/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/cluster_8b97c0a2-b56e-2508-7199-0bcaa1af3817/data/data8]'}, localName='127.0.0.1:45993', datanodeUuid='c22f2d70-f590-45e9-9ca1-b877c52e1b27', xmitsInProgress=0}:Exception transferring block BP-454738336-172.17.0.2-1731299645786:blk_1073741873_1056 to mirror 127.0.0.1:46201 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:34:28,296 WARN [Thread-962 {}] hdfs.DataStreamer(1731): Error Recovery for BP-454738336-172.17.0.2-1731299645786:blk_1073741873_1056 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45993,DS-96e399a0-103e-4b17-ba00-79e62af7c104,DISK], DatanodeInfoWithStorage[127.0.0.1:46201,DS-a4611af3-51d9-4d80-83b3-808bf9f260d5,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:46201,DS-a4611af3-51d9-4d80-83b3-808bf9f260d5,DISK]) is bad. 2024-11-11T04:34:28,296 WARN [Thread-962 {}] hdfs.DataStreamer(1850): Abandoning BP-454738336-172.17.0.2-1731299645786:blk_1073741873_1056 2024-11-11T04:34:28,296 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-667751035_22 at /127.0.0.1:54874 [Receiving block BP-454738336-172.17.0.2-1731299645786:blk_1073741873_1056] {}] datanode.BlockReceiver(316): Block 1073741873 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-11T04:34:28,296 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-667751035_22 at /127.0.0.1:54874 [Receiving block BP-454738336-172.17.0.2-1731299645786:blk_1073741873_1056] {}] datanode.DataXceiver(331): 127.0.0.1:45993:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54874 dst: /127.0.0.1:45993 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:34:28,296 WARN [Thread-962 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46201,DS-a4611af3-51d9-4d80-83b3-808bf9f260d5,DISK] 2024-11-11T04:34:28,297 WARN [Thread-962 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741874_1057 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:28,297 WARN [Thread-962 {}] hdfs.DataStreamer(1731): Error Recovery for BP-454738336-172.17.0.2-1731299645786:blk_1073741874_1057 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37241,DS-786c7f68-99f1-418b-b63e-c44144d0edb8,DISK], DatanodeInfoWithStorage[127.0.0.1:45993,DS-96e399a0-103e-4b17-ba00-79e62af7c104,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37241,DS-786c7f68-99f1-418b-b63e-c44144d0edb8,DISK]) is bad. 2024-11-11T04:34:28,297 WARN [Thread-962 {}] hdfs.DataStreamer(1850): Abandoning BP-454738336-172.17.0.2-1731299645786:blk_1073741874_1057 2024-11-11T04:34:28,298 WARN [Thread-962 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37241,DS-786c7f68-99f1-418b-b63e-c44144d0edb8,DISK] 2024-11-11T04:34:28,299 WARN [Thread-962 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741875_1058 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:28,299 WARN [Thread-962 {}] hdfs.DataStreamer(1731): Error Recovery for BP-454738336-172.17.0.2-1731299645786:blk_1073741875_1058 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40723,DS-0b19750a-ee85-4865-8aca-62398e1e42a1,DISK], DatanodeInfoWithStorage[127.0.0.1:45993,DS-96e399a0-103e-4b17-ba00-79e62af7c104,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40723,DS-0b19750a-ee85-4865-8aca-62398e1e42a1,DISK]) is bad. 2024-11-11T04:34:28,299 WARN [Thread-962 {}] hdfs.DataStreamer(1850): Abandoning BP-454738336-172.17.0.2-1731299645786:blk_1073741875_1058 2024-11-11T04:34:28,299 WARN [Thread-962 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40723,DS-0b19750a-ee85-4865-8aca-62398e1e42a1,DISK] 2024-11-11T04:34:28,300 WARN [IPC Server handler 0 on default port 41327 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-11T04:34:28,300 WARN [IPC Server handler 0 on default port 41327 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-11T04:34:28,300 WARN [IPC Server handler 0 on default port 41327 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-11T04:34:28,302 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:34:28,302 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:34:28,302 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:34:28,302 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:34:28,302 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:34:28,303 INFO [regionserver/a7bef91497aa:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.1731299666273 with entries=15, filesize=13.26 KB; new WAL /user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.1731299668290 2024-11-11T04:34:28,303 DEBUG [regionserver/a7bef91497aa:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43605:43605)] 2024-11-11T04:34:28,303 DEBUG [regionserver/a7bef91497aa:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.1731299646945 is not closed yet, will try archiving it next time 2024-11-11T04:34:28,303 DEBUG [regionserver/a7bef91497aa:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.1731299666273 is not closed yet, will try archiving it next time 2024-11-11T04:34:28,304 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.1731299662261 to hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/oldWALs/a7bef91497aa%2C38817%2C1731299646525.1731299662261 2024-11-11T04:34:28,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45993 is added to blk_1073741856_1039 (size=13591) 2024-11-11T04:34:28,612 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46201,DS-a4611af3-51d9-4d80-83b3-808bf9f260d5,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:28,705 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.1731299646945 is not closed yet, will try archiving it next time 2024-11-11T04:34:29,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38817 {}] regionserver.HRegion(8855): Flush requested on 6ddada07afcf2fed42f4de9080a351bf 2024-11-11T04:34:29,164 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 6ddada07afcf2fed42f4de9080a351bf 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-11T04:34:29,168 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf/.tmp/info/1b989ae92692401cb4c62ca105e86bc7 is 1079, key is tmprow/info:/1731299669162/Put/seqid=0 2024-11-11T04:34:29,170 WARN [Thread-967 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741877_1060 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:29,170 WARN [Thread-967 {}] hdfs.DataStreamer(1731): Error Recovery for BP-454738336-172.17.0.2-1731299645786:blk_1073741877_1060 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46201,DS-a4611af3-51d9-4d80-83b3-808bf9f260d5,DISK], DatanodeInfoWithStorage[127.0.0.1:32857,DS-0c790adc-35bd-4d1e-9dfa-17a5b4907a46,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46201,DS-a4611af3-51d9-4d80-83b3-808bf9f260d5,DISK]) is bad. 2024-11-11T04:34:29,170 WARN [Thread-967 {}] hdfs.DataStreamer(1850): Abandoning BP-454738336-172.17.0.2-1731299645786:blk_1073741877_1060 2024-11-11T04:34:29,171 WARN [Thread-967 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46201,DS-a4611af3-51d9-4d80-83b3-808bf9f260d5,DISK] 2024-11-11T04:34:29,172 WARN [Thread-967 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741878_1061 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:29,172 WARN [Thread-967 {}] hdfs.DataStreamer(1731): Error Recovery for BP-454738336-172.17.0.2-1731299645786:blk_1073741878_1061 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32857,DS-0c790adc-35bd-4d1e-9dfa-17a5b4907a46,DISK], DatanodeInfoWithStorage[127.0.0.1:45993,DS-96e399a0-103e-4b17-ba00-79e62af7c104,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32857,DS-0c790adc-35bd-4d1e-9dfa-17a5b4907a46,DISK]) is bad. 2024-11-11T04:34:29,172 WARN [Thread-967 {}] hdfs.DataStreamer(1850): Abandoning BP-454738336-172.17.0.2-1731299645786:blk_1073741878_1061 2024-11-11T04:34:29,173 WARN [Thread-967 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32857,DS-0c790adc-35bd-4d1e-9dfa-17a5b4907a46,DISK] 2024-11-11T04:34:29,175 WARN [Thread-967 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741879_1062 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37241 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:29,175 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-667751035_22 at /127.0.0.1:54892 [Receiving block BP-454738336-172.17.0.2-1731299645786:blk_1073741879_1062] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/cluster_8b97c0a2-b56e-2508-7199-0bcaa1af3817/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/cluster_8b97c0a2-b56e-2508-7199-0bcaa1af3817/data/data8]'}, localName='127.0.0.1:45993', datanodeUuid='c22f2d70-f590-45e9-9ca1-b877c52e1b27', xmitsInProgress=0}:Exception transferring block BP-454738336-172.17.0.2-1731299645786:blk_1073741879_1062 to mirror 127.0.0.1:37241 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:34:29,175 WARN [Thread-967 {}] hdfs.DataStreamer(1731): Error Recovery for BP-454738336-172.17.0.2-1731299645786:blk_1073741879_1062 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45993,DS-96e399a0-103e-4b17-ba00-79e62af7c104,DISK], DatanodeInfoWithStorage[127.0.0.1:37241,DS-786c7f68-99f1-418b-b63e-c44144d0edb8,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37241,DS-786c7f68-99f1-418b-b63e-c44144d0edb8,DISK]) is bad. 2024-11-11T04:34:29,175 WARN [Thread-967 {}] hdfs.DataStreamer(1850): Abandoning BP-454738336-172.17.0.2-1731299645786:blk_1073741879_1062 2024-11-11T04:34:29,175 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-667751035_22 at /127.0.0.1:54892 [Receiving block BP-454738336-172.17.0.2-1731299645786:blk_1073741879_1062] {}] datanode.BlockReceiver(316): Block 1073741879 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-11T04:34:29,175 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-667751035_22 at /127.0.0.1:54892 [Receiving block BP-454738336-172.17.0.2-1731299645786:blk_1073741879_1062] {}] datanode.DataXceiver(331): 127.0.0.1:45993:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54892 dst: /127.0.0.1:45993 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:34:29,175 WARN [Thread-967 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37241,DS-786c7f68-99f1-418b-b63e-c44144d0edb8,DISK] 2024-11-11T04:34:29,177 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-667751035_22 at /127.0.0.1:54896 [Receiving block BP-454738336-172.17.0.2-1731299645786:blk_1073741880_1063] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/cluster_8b97c0a2-b56e-2508-7199-0bcaa1af3817/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/cluster_8b97c0a2-b56e-2508-7199-0bcaa1af3817/data/data8]'}, localName='127.0.0.1:45993', datanodeUuid='c22f2d70-f590-45e9-9ca1-b877c52e1b27', xmitsInProgress=0}:Exception transferring block BP-454738336-172.17.0.2-1731299645786:blk_1073741880_1063 to mirror 127.0.0.1:40723 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:34:29,177 WARN [Thread-967 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741880_1063 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40723 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:29,177 WARN [Thread-967 {}] hdfs.DataStreamer(1731): Error Recovery for BP-454738336-172.17.0.2-1731299645786:blk_1073741880_1063 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45993,DS-96e399a0-103e-4b17-ba00-79e62af7c104,DISK], DatanodeInfoWithStorage[127.0.0.1:40723,DS-0b19750a-ee85-4865-8aca-62398e1e42a1,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40723,DS-0b19750a-ee85-4865-8aca-62398e1e42a1,DISK]) is bad. 2024-11-11T04:34:29,177 WARN [Thread-967 {}] hdfs.DataStreamer(1850): Abandoning BP-454738336-172.17.0.2-1731299645786:blk_1073741880_1063 2024-11-11T04:34:29,177 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-667751035_22 at /127.0.0.1:54896 [Receiving block BP-454738336-172.17.0.2-1731299645786:blk_1073741880_1063] {}] datanode.BlockReceiver(316): Block 1073741880 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-11T04:34:29,177 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-667751035_22 at /127.0.0.1:54896 [Receiving block BP-454738336-172.17.0.2-1731299645786:blk_1073741880_1063] {}] datanode.DataXceiver(331): 127.0.0.1:45993:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54896 dst: /127.0.0.1:45993 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:34:29,178 WARN [Thread-967 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40723,DS-0b19750a-ee85-4865-8aca-62398e1e42a1,DISK] 2024-11-11T04:34:29,178 WARN [IPC Server handler 4 on default port 41327 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-11T04:34:29,178 WARN [IPC Server handler 4 on default port 41327 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-11T04:34:29,179 WARN [IPC Server handler 4 on default port 41327 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-11T04:34:29,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45993 is added to blk_1073741881_1064 (size=6027) 2024-11-11T04:34:29,528 INFO [regionserver/a7bef91497aa:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46201,DS-a4611af3-51d9-4d80-83b3-808bf9f260d5,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:29,582 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf/.tmp/info/1b989ae92692401cb4c62ca105e86bc7 2024-11-11T04:34:29,589 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf/.tmp/info/1b989ae92692401cb4c62ca105e86bc7 as hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf/info/1b989ae92692401cb4c62ca105e86bc7 2024-11-11T04:34:29,594 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf/info/1b989ae92692401cb4c62ca105e86bc7, entries=1, sequenceid=55, filesize=5.9 K 2024-11-11T04:34:29,595 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 6ddada07afcf2fed42f4de9080a351bf in 432ms, sequenceid=55, compaction requested=true 2024-11-11T04:34:29,595 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 6ddada07afcf2fed42f4de9080a351bf: 2024-11-11T04:34:29,595 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=29.3 K, sizeToCheck=16.0 K 2024-11-11T04:34:29,595 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T04:34:29,595 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf/info/9025954da55f4d128b42822fc60ea5b7 because midkey is the same as first or last row 2024-11-11T04:34:29,595 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6ddada07afcf2fed42f4de9080a351bf:info, priority=-2147483648, current under compaction store size is 1 2024-11-11T04:34:29,595 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T04:34:29,595 DEBUG [RS:0;a7bef91497aa:38817-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T04:34:29,596 DEBUG [RS:0;a7bef91497aa:38817-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 30048 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T04:34:29,597 DEBUG [RS:0;a7bef91497aa:38817-shortCompactions-0 {}] regionserver.HStore(1541): 6ddada07afcf2fed42f4de9080a351bf/info is initiating minor compaction (all files) 2024-11-11T04:34:29,597 INFO [RS:0;a7bef91497aa:38817-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 6ddada07afcf2fed42f4de9080a351bf/info in TestLogRolling-testLogRollOnDatanodeDeath,,1731299647559.6ddada07afcf2fed42f4de9080a351bf. 2024-11-11T04:34:29,597 INFO [RS:0;a7bef91497aa:38817-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf/info/9025954da55f4d128b42822fc60ea5b7, hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf/info/5184cac00a10495c9b0aa7d51dadd8a3, hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf/info/1b989ae92692401cb4c62ca105e86bc7] into tmpdir=hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf/.tmp, totalSize=29.3 K 2024-11-11T04:34:29,597 DEBUG [RS:0;a7bef91497aa:38817-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9025954da55f4d128b42822fc60ea5b7, keycount=12, bloomtype=ROW, size=17.6 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1731299660268 2024-11-11T04:34:29,598 DEBUG [RS:0;a7bef91497aa:38817-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5184cac00a10495c9b0aa7d51dadd8a3, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=45, earliestPutTs=1731299667744 2024-11-11T04:34:29,598 DEBUG [RS:0;a7bef91497aa:38817-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1b989ae92692401cb4c62ca105e86bc7, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1731299669162 2024-11-11T04:34:29,611 INFO [RS:0;a7bef91497aa:38817-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6ddada07afcf2fed42f4de9080a351bf#info#compaction#24 average throughput is 12.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T04:34:29,612 DEBUG [RS:0;a7bef91497aa:38817-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf/.tmp/info/5a577459378146e6b187f01e01a550d8 is 1080, key is row0002/info:/1731299660268/Put/seqid=0 2024-11-11T04:34:29,613 WARN [Thread-973 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741882_1065 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:29,614 WARN [Thread-973 {}] hdfs.DataStreamer(1731): Error Recovery for BP-454738336-172.17.0.2-1731299645786:blk_1073741882_1065 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40723,DS-0b19750a-ee85-4865-8aca-62398e1e42a1,DISK], DatanodeInfoWithStorage[127.0.0.1:46201,DS-a4611af3-51d9-4d80-83b3-808bf9f260d5,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40723,DS-0b19750a-ee85-4865-8aca-62398e1e42a1,DISK]) is bad. 2024-11-11T04:34:29,614 WARN [Thread-973 {}] hdfs.DataStreamer(1850): Abandoning BP-454738336-172.17.0.2-1731299645786:blk_1073741882_1065 2024-11-11T04:34:29,614 WARN [Thread-973 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40723,DS-0b19750a-ee85-4865-8aca-62398e1e42a1,DISK] 2024-11-11T04:34:29,616 WARN [Thread-973 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741883_1066 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37241 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:29,616 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-667751035_22 at /127.0.0.1:54920 [Receiving block BP-454738336-172.17.0.2-1731299645786:blk_1073741883_1066] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/cluster_8b97c0a2-b56e-2508-7199-0bcaa1af3817/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/cluster_8b97c0a2-b56e-2508-7199-0bcaa1af3817/data/data8]'}, localName='127.0.0.1:45993', datanodeUuid='c22f2d70-f590-45e9-9ca1-b877c52e1b27', xmitsInProgress=0}:Exception transferring block BP-454738336-172.17.0.2-1731299645786:blk_1073741883_1066 to mirror 127.0.0.1:37241 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:34:29,616 WARN [Thread-973 {}] hdfs.DataStreamer(1731): Error Recovery for BP-454738336-172.17.0.2-1731299645786:blk_1073741883_1066 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45993,DS-96e399a0-103e-4b17-ba00-79e62af7c104,DISK], DatanodeInfoWithStorage[127.0.0.1:37241,DS-786c7f68-99f1-418b-b63e-c44144d0edb8,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37241,DS-786c7f68-99f1-418b-b63e-c44144d0edb8,DISK]) is bad. 2024-11-11T04:34:29,616 WARN [Thread-973 {}] hdfs.DataStreamer(1850): Abandoning BP-454738336-172.17.0.2-1731299645786:blk_1073741883_1066 2024-11-11T04:34:29,617 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-667751035_22 at /127.0.0.1:54920 [Receiving block BP-454738336-172.17.0.2-1731299645786:blk_1073741883_1066] {}] datanode.BlockReceiver(316): Block 1073741883 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-11T04:34:29,617 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-667751035_22 at /127.0.0.1:54920 [Receiving block BP-454738336-172.17.0.2-1731299645786:blk_1073741883_1066] {}] datanode.DataXceiver(331): 127.0.0.1:45993:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54920 dst: /127.0.0.1:45993 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:34:29,617 WARN [Thread-973 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37241,DS-786c7f68-99f1-418b-b63e-c44144d0edb8,DISK] 2024-11-11T04:34:29,619 WARN [Thread-973 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741884_1067 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:46201 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:29,619 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-667751035_22 at /127.0.0.1:54928 [Receiving block BP-454738336-172.17.0.2-1731299645786:blk_1073741884_1067] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/cluster_8b97c0a2-b56e-2508-7199-0bcaa1af3817/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/cluster_8b97c0a2-b56e-2508-7199-0bcaa1af3817/data/data8]'}, localName='127.0.0.1:45993', datanodeUuid='c22f2d70-f590-45e9-9ca1-b877c52e1b27', xmitsInProgress=0}:Exception transferring block BP-454738336-172.17.0.2-1731299645786:blk_1073741884_1067 to mirror 127.0.0.1:46201 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:34:29,619 WARN [Thread-973 {}] hdfs.DataStreamer(1731): Error Recovery for BP-454738336-172.17.0.2-1731299645786:blk_1073741884_1067 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45993,DS-96e399a0-103e-4b17-ba00-79e62af7c104,DISK], DatanodeInfoWithStorage[127.0.0.1:46201,DS-a4611af3-51d9-4d80-83b3-808bf9f260d5,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:46201,DS-a4611af3-51d9-4d80-83b3-808bf9f260d5,DISK]) is bad. 2024-11-11T04:34:29,619 WARN [Thread-973 {}] hdfs.DataStreamer(1850): Abandoning BP-454738336-172.17.0.2-1731299645786:blk_1073741884_1067 2024-11-11T04:34:29,619 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-667751035_22 at /127.0.0.1:54928 [Receiving block BP-454738336-172.17.0.2-1731299645786:blk_1073741884_1067] {}] datanode.BlockReceiver(316): Block 1073741884 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-11T04:34:29,619 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-667751035_22 at /127.0.0.1:54928 [Receiving block BP-454738336-172.17.0.2-1731299645786:blk_1073741884_1067] {}] datanode.DataXceiver(331): 127.0.0.1:45993:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54928 dst: /127.0.0.1:45993 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:34:29,619 WARN [Thread-973 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46201,DS-a4611af3-51d9-4d80-83b3-808bf9f260d5,DISK] 2024-11-11T04:34:29,620 WARN [Thread-973 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741885_1068 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:29,620 WARN [Thread-973 {}] hdfs.DataStreamer(1731): Error Recovery for BP-454738336-172.17.0.2-1731299645786:blk_1073741885_1068 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32857,DS-0c790adc-35bd-4d1e-9dfa-17a5b4907a46,DISK], DatanodeInfoWithStorage[127.0.0.1:45993,DS-96e399a0-103e-4b17-ba00-79e62af7c104,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32857,DS-0c790adc-35bd-4d1e-9dfa-17a5b4907a46,DISK]) is bad. 2024-11-11T04:34:29,620 WARN [Thread-973 {}] hdfs.DataStreamer(1850): Abandoning BP-454738336-172.17.0.2-1731299645786:blk_1073741885_1068 2024-11-11T04:34:29,621 WARN [Thread-973 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32857,DS-0c790adc-35bd-4d1e-9dfa-17a5b4907a46,DISK] 2024-11-11T04:34:29,621 WARN [IPC Server handler 0 on default port 41327 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-11T04:34:29,621 WARN [IPC Server handler 0 on default port 41327 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-11T04:34:29,621 WARN [IPC Server handler 0 on default port 41327 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-11T04:34:29,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45993 is added to blk_1073741886_1069 (size=18097) 2024-11-11T04:34:30,031 DEBUG [RS:0;a7bef91497aa:38817-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf/.tmp/info/5a577459378146e6b187f01e01a550d8 as hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf/info/5a577459378146e6b187f01e01a550d8 2024-11-11T04:34:30,038 INFO [RS:0;a7bef91497aa:38817-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 6ddada07afcf2fed42f4de9080a351bf/info of 6ddada07afcf2fed42f4de9080a351bf into 5a577459378146e6b187f01e01a550d8(size=17.7 K), total size for store is 17.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T04:34:30,038 DEBUG [RS:0;a7bef91497aa:38817-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 6ddada07afcf2fed42f4de9080a351bf: 2024-11-11T04:34:30,038 INFO [RS:0;a7bef91497aa:38817-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1731299647559.6ddada07afcf2fed42f4de9080a351bf., storeName=6ddada07afcf2fed42f4de9080a351bf/info, priority=13, startTime=1731299669595; duration=0sec 2024-11-11T04:34:30,039 DEBUG [RS:0;a7bef91497aa:38817-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-11T04:34:30,039 DEBUG [RS:0;a7bef91497aa:38817-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T04:34:30,039 DEBUG [RS:0;a7bef91497aa:38817-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf/info/5a577459378146e6b187f01e01a550d8 because midkey is the same as first or last row 2024-11-11T04:34:30,039 DEBUG [RS:0;a7bef91497aa:38817-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-11T04:34:30,039 DEBUG [RS:0;a7bef91497aa:38817-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T04:34:30,039 DEBUG [RS:0;a7bef91497aa:38817-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf/info/5a577459378146e6b187f01e01a550d8 because midkey is the same as first or last row 2024-11-11T04:34:30,039 DEBUG [RS:0;a7bef91497aa:38817-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-11T04:34:30,039 DEBUG [RS:0;a7bef91497aa:38817-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T04:34:30,039 DEBUG [RS:0;a7bef91497aa:38817-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf/info/5a577459378146e6b187f01e01a550d8 because midkey is the same as first or last row 2024-11-11T04:34:30,039 DEBUG [RS:0;a7bef91497aa:38817-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T04:34:30,039 DEBUG [RS:0;a7bef91497aa:38817-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6ddada07afcf2fed42f4de9080a351bf:info 2024-11-11T04:34:30,061 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@4f6f10a[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:45993, datanodeUuid=c22f2d70-f590-45e9-9ca1-b877c52e1b27, infoPort=43605, infoSecurePort=0, ipcPort=39039, storageInfo=lv=-57;cid=testClusterID;nsid=1450183398;c=1731299645786):Failed to transfer BP-454738336-172.17.0.2-1731299645786:blk_1073741871_1054 to 127.0.0.1:37241 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:34:30,061 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@7026a979[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:45993, datanodeUuid=c22f2d70-f590-45e9-9ca1-b877c52e1b27, infoPort=43605, infoSecurePort=0, ipcPort=39039, storageInfo=lv=-57;cid=testClusterID;nsid=1450183398;c=1731299645786):Failed to transfer BP-454738336-172.17.0.2-1731299645786:blk_1073741866_1049 to 127.0.0.1:40723 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:34:30,304 INFO [regionserver/a7bef91497aa:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46201,DS-a4611af3-51d9-4d80-83b3-808bf9f260d5,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:30,304 WARN [regionserver/a7bef91497aa:0.logRoller {}] wal.FSHLog(539): Too many consecutive RollWriter requests, it's a sign of the total number of live datanodes is lower than the tolerable replicas. 2024-11-11T04:34:30,387 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T04:34:30,391 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T04:34:30,392 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T04:34:30,392 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T04:34:30,392 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-11T04:34:30,393 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1191c470{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/hadoop.log.dir/,AVAILABLE} 2024-11-11T04:34:30,393 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@69a3dde8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T04:34:30,504 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@69aa82e5{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/java.io.tmpdir/jetty-localhost-37035-hadoop-hdfs-3_4_1-tests_jar-_-any-8850609121966139463/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T04:34:30,505 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6fc4b42f{HTTP/1.1, (http/1.1)}{localhost:37035} 2024-11-11T04:34:30,505 INFO [Time-limited test {}] server.Server(415): Started @128307ms 2024-11-11T04:34:30,506 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T04:34:30,603 WARN [Thread-993 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T04:34:30,611 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x89ba0aca51d960f3 with lease ID 0x57a3c7b2bc056b21: from storage DS-0b19750a-ee85-4865-8aca-62398e1e42a1 node DatanodeRegistration(127.0.0.1:35251, datanodeUuid=9cbb48e6-377e-490c-b963-e735d7cbd305, infoPort=35567, infoSecurePort=0, ipcPort=46615, storageInfo=lv=-57;cid=testClusterID;nsid=1450183398;c=1731299645786), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T04:34:30,612 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x89ba0aca51d960f3 with lease ID 0x57a3c7b2bc056b21: from storage DS-43895f34-7d23-4f68-a6f8-18b02b211e48 node DatanodeRegistration(127.0.0.1:35251, datanodeUuid=9cbb48e6-377e-490c-b963-e735d7cbd305, infoPort=35567, infoSecurePort=0, ipcPort=46615, storageInfo=lv=-57;cid=testClusterID;nsid=1450183398;c=1731299645786), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T04:34:30,613 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46201,DS-a4611af3-51d9-4d80-83b3-808bf9f260d5,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:31,061 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@7026a979[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:45993, datanodeUuid=c22f2d70-f590-45e9-9ca1-b877c52e1b27, infoPort=43605, infoSecurePort=0, ipcPort=39039, storageInfo=lv=-57;cid=testClusterID;nsid=1450183398;c=1731299645786):Failed to transfer BP-454738336-172.17.0.2-1731299645786:blk_1073741856_1039 to 127.0.0.1:32857 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:34:31,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35251 is added to blk_1073741881_1064 (size=6027) 2024-11-11T04:34:31,529 INFO [regionserver/a7bef91497aa:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46201,DS-a4611af3-51d9-4d80-83b3-808bf9f260d5,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:32,304 INFO [regionserver/a7bef91497aa:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46201,DS-a4611af3-51d9-4d80-83b3-808bf9f260d5,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:32,613 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46201,DS-a4611af3-51d9-4d80-83b3-808bf9f260d5,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:33,061 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@4f6f10a[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:45993, datanodeUuid=c22f2d70-f590-45e9-9ca1-b877c52e1b27, infoPort=43605, infoSecurePort=0, ipcPort=39039, storageInfo=lv=-57;cid=testClusterID;nsid=1450183398;c=1731299645786):Failed to transfer BP-454738336-172.17.0.2-1731299645786:blk_1073741886_1069 to 127.0.0.1:37241 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:34:33,529 INFO [regionserver/a7bef91497aa:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46201,DS-a4611af3-51d9-4d80-83b3-808bf9f260d5,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:34,305 INFO [regionserver/a7bef91497aa:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46201,DS-a4611af3-51d9-4d80-83b3-808bf9f260d5,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:34,613 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46201,DS-a4611af3-51d9-4d80-83b3-808bf9f260d5,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:35,529 INFO [regionserver/a7bef91497aa:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46201,DS-a4611af3-51d9-4d80-83b3-808bf9f260d5,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:36,305 INFO [regionserver/a7bef91497aa:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46201,DS-a4611af3-51d9-4d80-83b3-808bf9f260d5,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:36,453 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-11T04:34:36,614 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46201,DS-a4611af3-51d9-4d80-83b3-808bf9f260d5,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:36,713 ERROR [FSHLog-0-hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/MasterData-prefix:a7bef91497aa,34363,1731299646472 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46201,DS-a4611af3-51d9-4d80-83b3-808bf9f260d5,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:36,713 WARN [FSHLog-0-hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/MasterData-prefix:a7bef91497aa,34363,1731299646472 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46201,DS-a4611af3-51d9-4d80-83b3-808bf9f260d5,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:36,714 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog a7bef91497aa%2C34363%2C1731299646472:(num 1731299646634) roll requested 2024-11-11T04:34:36,714 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor a7bef91497aa%2C34363%2C1731299646472.1731299676714 2024-11-11T04:34:36,718 WARN [Thread-1014 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741887_1070 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37241 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:36,718 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_517426701_22 at /127.0.0.1:56234 [Receiving block BP-454738336-172.17.0.2-1731299645786:blk_1073741887_1070] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/cluster_8b97c0a2-b56e-2508-7199-0bcaa1af3817/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/cluster_8b97c0a2-b56e-2508-7199-0bcaa1af3817/data/data8]'}, localName='127.0.0.1:45993', datanodeUuid='c22f2d70-f590-45e9-9ca1-b877c52e1b27', xmitsInProgress=0}:Exception transferring block BP-454738336-172.17.0.2-1731299645786:blk_1073741887_1070 to mirror 127.0.0.1:37241 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:34:36,718 WARN [Thread-1014 {}] hdfs.DataStreamer(1731): Error Recovery for BP-454738336-172.17.0.2-1731299645786:blk_1073741887_1070 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45993,DS-96e399a0-103e-4b17-ba00-79e62af7c104,DISK], DatanodeInfoWithStorage[127.0.0.1:37241,DS-786c7f68-99f1-418b-b63e-c44144d0edb8,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37241,DS-786c7f68-99f1-418b-b63e-c44144d0edb8,DISK]) is bad. 2024-11-11T04:34:36,718 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_517426701_22 at /127.0.0.1:56234 [Receiving block BP-454738336-172.17.0.2-1731299645786:blk_1073741887_1070] {}] datanode.BlockReceiver(316): Block 1073741887 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-11T04:34:36,718 WARN [Thread-1014 {}] hdfs.DataStreamer(1850): Abandoning BP-454738336-172.17.0.2-1731299645786:blk_1073741887_1070 2024-11-11T04:34:36,718 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_517426701_22 at /127.0.0.1:56234 [Receiving block BP-454738336-172.17.0.2-1731299645786:blk_1073741887_1070] {}] datanode.DataXceiver(331): 127.0.0.1:45993:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56234 dst: /127.0.0.1:45993 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:34:36,719 WARN [Thread-1014 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37241,DS-786c7f68-99f1-418b-b63e-c44144d0edb8,DISK] 2024-11-11T04:34:36,721 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_517426701_22 at /127.0.0.1:56246 [Receiving block BP-454738336-172.17.0.2-1731299645786:blk_1073741888_1071] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/cluster_8b97c0a2-b56e-2508-7199-0bcaa1af3817/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/cluster_8b97c0a2-b56e-2508-7199-0bcaa1af3817/data/data8]'}, localName='127.0.0.1:45993', datanodeUuid='c22f2d70-f590-45e9-9ca1-b877c52e1b27', xmitsInProgress=0}:Exception transferring block BP-454738336-172.17.0.2-1731299645786:blk_1073741888_1071 to mirror 127.0.0.1:32857 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:34:36,721 WARN [Thread-1014 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741888_1071 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:32857 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:36,721 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_517426701_22 at /127.0.0.1:56246 [Receiving block BP-454738336-172.17.0.2-1731299645786:blk_1073741888_1071] {}] datanode.BlockReceiver(316): Block 1073741888 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-11T04:34:36,721 WARN [Thread-1014 {}] hdfs.DataStreamer(1731): Error Recovery for BP-454738336-172.17.0.2-1731299645786:blk_1073741888_1071 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45993,DS-96e399a0-103e-4b17-ba00-79e62af7c104,DISK], DatanodeInfoWithStorage[127.0.0.1:32857,DS-0c790adc-35bd-4d1e-9dfa-17a5b4907a46,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:32857,DS-0c790adc-35bd-4d1e-9dfa-17a5b4907a46,DISK]) is bad. 2024-11-11T04:34:36,721 WARN [Thread-1014 {}] hdfs.DataStreamer(1850): Abandoning BP-454738336-172.17.0.2-1731299645786:blk_1073741888_1071 2024-11-11T04:34:36,721 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_517426701_22 at /127.0.0.1:56246 [Receiving block BP-454738336-172.17.0.2-1731299645786:blk_1073741888_1071] {}] datanode.DataXceiver(331): 127.0.0.1:45993:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56246 dst: /127.0.0.1:45993 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:34:36,722 WARN [Thread-1014 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32857,DS-0c790adc-35bd-4d1e-9dfa-17a5b4907a46,DISK] 2024-11-11T04:34:36,725 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:34:36,725 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:34:36,726 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:34:36,726 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:34:36,726 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:34:36,726 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/MasterData/WALs/a7bef91497aa,34363,1731299646472/a7bef91497aa%2C34363%2C1731299646472.1731299646634 with entries=54, filesize=26.67 KB; new WAL /user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/MasterData/WALs/a7bef91497aa,34363,1731299646472/a7bef91497aa%2C34363%2C1731299646472.1731299676714 2024-11-11T04:34:36,726 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46201,DS-a4611af3-51d9-4d80-83b3-808bf9f260d5,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:36,726 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46201,DS-a4611af3-51d9-4d80-83b3-808bf9f260d5,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:36,726 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/MasterData/WALs/a7bef91497aa,34363,1731299646472/a7bef91497aa%2C34363%2C1731299646472.1731299646634 2024-11-11T04:34:36,727 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43605:43605),(127.0.0.1/127.0.0.1:35567:35567)] 2024-11-11T04:34:36,727 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/MasterData/WALs/a7bef91497aa,34363,1731299646472/a7bef91497aa%2C34363%2C1731299646472.1731299646634 is not closed yet, will try archiving it next time 2024-11-11T04:34:36,727 WARN [IPC Server handler 4 on default port 41327 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/MasterData/WALs/a7bef91497aa,34363,1731299646472/a7bef91497aa%2C34363%2C1731299646472.1731299646634 has not been closed. Lease recovery is in progress. RecoveryId = 1073 for block blk_1073741830_1006 2024-11-11T04:34:36,727 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/MasterData/WALs/a7bef91497aa,34363,1731299646472/a7bef91497aa%2C34363%2C1731299646472.1731299646634 after 1ms 2024-11-11T04:34:37,530 INFO [regionserver/a7bef91497aa:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46201,DS-a4611af3-51d9-4d80-83b3-808bf9f260d5,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:38,305 INFO [regionserver/a7bef91497aa:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46201,DS-a4611af3-51d9-4d80-83b3-808bf9f260d5,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:39,530 INFO [regionserver/a7bef91497aa:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46201,DS-a4611af3-51d9-4d80-83b3-808bf9f260d5,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:40,306 INFO [regionserver/a7bef91497aa:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46201,DS-a4611af3-51d9-4d80-83b3-808bf9f260d5,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:40,627 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@2f7f35af {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-454738336-172.17.0.2-1731299645786:blk_1073741833_1009, datanode=DatanodeInfoWithStorage[127.0.0.1:46201,null,null]) java.net.ConnectException: Call From a7bef91497aa/172.17.0.2 to localhost:42071 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-11T04:34:40,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35251 is added to blk_1073741833_1020 (size=455) 2024-11-11T04:34:40,728 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/MasterData/WALs/a7bef91497aa,34363,1731299646472/a7bef91497aa%2C34363%2C1731299646472.1731299646634 after 4002ms 2024-11-11T04:34:41,285 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.1731299646945 to hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/oldWALs/a7bef91497aa%2C38817%2C1731299646525.1731299646945 2024-11-11T04:34:41,286 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.1731299666273 to hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/oldWALs/a7bef91497aa%2C38817%2C1731299646525.1731299666273 2024-11-11T04:34:41,530 INFO [regionserver/a7bef91497aa:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46201,DS-a4611af3-51d9-4d80-83b3-808bf9f260d5,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:42,306 INFO [regionserver/a7bef91497aa:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46201,DS-a4611af3-51d9-4d80-83b3-808bf9f260d5,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:42,607 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@19545628[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:35251, datanodeUuid=9cbb48e6-377e-490c-b963-e735d7cbd305, infoPort=35567, infoSecurePort=0, ipcPort=46615, storageInfo=lv=-57;cid=testClusterID;nsid=1450183398;c=1731299645786):Failed to transfer BP-454738336-172.17.0.2-1731299645786:blk_1073741836_1012 to 127.0.0.1:32857 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:34:42,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45993 is added to blk_1073741833_1020 (size=455) 2024-11-11T04:34:43,531 INFO [regionserver/a7bef91497aa:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46201,DS-a4611af3-51d9-4d80-83b3-808bf9f260d5,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:43,607 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@19545628[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:35251, datanodeUuid=9cbb48e6-377e-490c-b963-e735d7cbd305, infoPort=35567, infoSecurePort=0, ipcPort=46615, storageInfo=lv=-57;cid=testClusterID;nsid=1450183398;c=1731299645786):Failed to transfer BP-454738336-172.17.0.2-1731299645786:blk_1073741828_1004 to 127.0.0.1:32857 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:34:43,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45993 is added to blk_1073741832_1008 (size=32) 2024-11-11T04:34:43,925 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor a7bef91497aa%2C38817%2C1731299646525.1731299683925 2024-11-11T04:34:43,931 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:34:43,931 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:34:43,931 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:34:43,931 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:34:43,931 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:34:43,932 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.1731299668290 with entries=13, filesize=12.60 KB; new WAL /user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.1731299683925 2024-11-11T04:34:43,932 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35567:35567),(127.0.0.1/127.0.0.1:43605:43605)] 2024-11-11T04:34:43,932 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.1731299668290 is not closed yet, will try archiving it next time 2024-11-11T04:34:43,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45993 is added to blk_1073741876_1059 (size=12911) 2024-11-11T04:34:43,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38817 {}] regionserver.HRegion(8855): Flush requested on 6ddada07afcf2fed42f4de9080a351bf 2024-11-11T04:34:43,937 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 6ddada07afcf2fed42f4de9080a351bf 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-11T04:34:43,943 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf/.tmp/info/579633d2829e49ca993a1448af16e0cb is 1080, key is row0013/info:/1731299683934/Put/seqid=0 2024-11-11T04:34:43,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35251 is added to blk_1073741891_1075 (size=8190) 2024-11-11T04:34:43,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45993 is added to blk_1073741891_1075 (size=8190) 2024-11-11T04:34:43,958 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf/.tmp/info/579633d2829e49ca993a1448af16e0cb 2024-11-11T04:34:43,964 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf/.tmp/info/579633d2829e49ca993a1448af16e0cb as hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf/info/579633d2829e49ca993a1448af16e0cb 2024-11-11T04:34:43,970 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf/info/579633d2829e49ca993a1448af16e0cb, entries=3, sequenceid=66, filesize=8.0 K 2024-11-11T04:34:43,971 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7527, heapSize ~8.11 KB/8304, currentSize=9.46 KB/9683 for 6ddada07afcf2fed42f4de9080a351bf in 34ms, sequenceid=66, compaction requested=false 2024-11-11T04:34:43,971 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 6ddada07afcf2fed42f4de9080a351bf: 2024-11-11T04:34:43,971 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=25.7 K, sizeToCheck=16.0 K 2024-11-11T04:34:43,971 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T04:34:43,971 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf/info/5a577459378146e6b187f01e01a550d8 because midkey is the same as first or last row 2024-11-11T04:34:44,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38817 {}] regionserver.HRegion(8855): Flush requested on 6ddada07afcf2fed42f4de9080a351bf 2024-11-11T04:34:44,157 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 6ddada07afcf2fed42f4de9080a351bf 1/1 column families, dataSize=10.51 KB heapSize=11.50 KB 2024-11-11T04:34:44,162 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf/.tmp/info/52003d6e50924aedad07ea715cf2e7e2 is 1080, key is row0015/info:/1731299683938/Put/seqid=0 2024-11-11T04:34:44,163 WARN [Thread-1043 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741892_1076 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:44,163 WARN [Thread-1043 {}] hdfs.DataStreamer(1731): Error Recovery for BP-454738336-172.17.0.2-1731299645786:blk_1073741892_1076 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32857,DS-0c790adc-35bd-4d1e-9dfa-17a5b4907a46,DISK], DatanodeInfoWithStorage[127.0.0.1:45993,DS-96e399a0-103e-4b17-ba00-79e62af7c104,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32857,DS-0c790adc-35bd-4d1e-9dfa-17a5b4907a46,DISK]) is bad. 2024-11-11T04:34:44,163 WARN [Thread-1043 {}] hdfs.DataStreamer(1850): Abandoning BP-454738336-172.17.0.2-1731299645786:blk_1073741892_1076 2024-11-11T04:34:44,164 WARN [Thread-1043 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32857,DS-0c790adc-35bd-4d1e-9dfa-17a5b4907a46,DISK] 2024-11-11T04:34:44,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35251 is added to blk_1073741893_1077 (size=14660) 2024-11-11T04:34:44,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45993 is added to blk_1073741893_1077 (size=14660) 2024-11-11T04:34:44,170 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.51 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf/.tmp/info/52003d6e50924aedad07ea715cf2e7e2 2024-11-11T04:34:44,176 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf/.tmp/info/52003d6e50924aedad07ea715cf2e7e2 as hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf/info/52003d6e50924aedad07ea715cf2e7e2 2024-11-11T04:34:44,181 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf/info/52003d6e50924aedad07ea715cf2e7e2, entries=9, sequenceid=79, filesize=14.3 K 2024-11-11T04:34:44,183 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.51 KB/10758, heapSize ~11.48 KB/11760, currentSize=0 B/0 for 6ddada07afcf2fed42f4de9080a351bf in 26ms, sequenceid=79, compaction requested=true 2024-11-11T04:34:44,183 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 6ddada07afcf2fed42f4de9080a351bf: 2024-11-11T04:34:44,183 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=40.0 K, sizeToCheck=16.0 K 2024-11-11T04:34:44,183 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T04:34:44,183 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf/info/5a577459378146e6b187f01e01a550d8 because midkey is the same as first or last row 2024-11-11T04:34:44,183 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6ddada07afcf2fed42f4de9080a351bf:info, priority=-2147483648, current under compaction store size is 1 2024-11-11T04:34:44,183 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T04:34:44,183 DEBUG [RS:0;a7bef91497aa:38817-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T04:34:44,184 DEBUG [RS:0;a7bef91497aa:38817-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40947 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T04:34:44,184 DEBUG [RS:0;a7bef91497aa:38817-shortCompactions-0 {}] regionserver.HStore(1541): 6ddada07afcf2fed42f4de9080a351bf/info is initiating minor compaction (all files) 2024-11-11T04:34:44,184 INFO [RS:0;a7bef91497aa:38817-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 6ddada07afcf2fed42f4de9080a351bf/info in TestLogRolling-testLogRollOnDatanodeDeath,,1731299647559.6ddada07afcf2fed42f4de9080a351bf. 2024-11-11T04:34:44,184 INFO [RS:0;a7bef91497aa:38817-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf/info/5a577459378146e6b187f01e01a550d8, hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf/info/579633d2829e49ca993a1448af16e0cb, hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf/info/52003d6e50924aedad07ea715cf2e7e2] into tmpdir=hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf/.tmp, totalSize=40.0 K 2024-11-11T04:34:44,185 DEBUG [RS:0;a7bef91497aa:38817-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5a577459378146e6b187f01e01a550d8, keycount=12, bloomtype=ROW, size=17.7 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1731299660268 2024-11-11T04:34:44,185 DEBUG [RS:0;a7bef91497aa:38817-shortCompactions-0 {}] compactions.Compactor(225): Compacting 579633d2829e49ca993a1448af16e0cb, keycount=3, bloomtype=ROW, size=8.0 K, encoding=NONE, compression=NONE, seqNum=66, earliestPutTs=1731299670176 2024-11-11T04:34:44,186 DEBUG [RS:0;a7bef91497aa:38817-shortCompactions-0 {}] compactions.Compactor(225): Compacting 52003d6e50924aedad07ea715cf2e7e2, keycount=9, bloomtype=ROW, size=14.3 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1731299683938 2024-11-11T04:34:44,197 INFO [RS:0;a7bef91497aa:38817-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6ddada07afcf2fed42f4de9080a351bf#info#compaction#27 average throughput is 22.57 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T04:34:44,198 DEBUG [RS:0;a7bef91497aa:38817-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf/.tmp/info/537a8a7bc65341d0b640a2668005f3d3 is 1080, key is row0002/info:/1731299660268/Put/seqid=0 2024-11-11T04:34:44,200 WARN [Thread-1051 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741894_1078 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:44,200 WARN [Thread-1051 {}] hdfs.DataStreamer(1731): Error Recovery for BP-454738336-172.17.0.2-1731299645786:blk_1073741894_1078 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32857,DS-0c790adc-35bd-4d1e-9dfa-17a5b4907a46,DISK], DatanodeInfoWithStorage[127.0.0.1:45993,DS-96e399a0-103e-4b17-ba00-79e62af7c104,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32857,DS-0c790adc-35bd-4d1e-9dfa-17a5b4907a46,DISK]) is bad. 2024-11-11T04:34:44,200 WARN [Thread-1051 {}] hdfs.DataStreamer(1850): Abandoning BP-454738336-172.17.0.2-1731299645786:blk_1073741894_1078 2024-11-11T04:34:44,201 WARN [Thread-1051 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32857,DS-0c790adc-35bd-4d1e-9dfa-17a5b4907a46,DISK] 2024-11-11T04:34:44,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35251 is added to blk_1073741895_1079 (size=28989) 2024-11-11T04:34:44,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45993 is added to blk_1073741895_1079 (size=28989) 2024-11-11T04:34:44,211 DEBUG [RS:0;a7bef91497aa:38817-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf/.tmp/info/537a8a7bc65341d0b640a2668005f3d3 as hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf/info/537a8a7bc65341d0b640a2668005f3d3 2024-11-11T04:34:44,218 INFO [RS:0;a7bef91497aa:38817-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 6ddada07afcf2fed42f4de9080a351bf/info of 6ddada07afcf2fed42f4de9080a351bf into 537a8a7bc65341d0b640a2668005f3d3(size=28.3 K), total size for store is 28.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T04:34:44,218 DEBUG [RS:0;a7bef91497aa:38817-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 6ddada07afcf2fed42f4de9080a351bf: 2024-11-11T04:34:44,218 INFO [RS:0;a7bef91497aa:38817-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1731299647559.6ddada07afcf2fed42f4de9080a351bf., storeName=6ddada07afcf2fed42f4de9080a351bf/info, priority=13, startTime=1731299684183; duration=0sec 2024-11-11T04:34:44,218 DEBUG [RS:0;a7bef91497aa:38817-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.3 K, sizeToCheck=16.0 K 2024-11-11T04:34:44,218 DEBUG [RS:0;a7bef91497aa:38817-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T04:34:44,218 DEBUG [RS:0;a7bef91497aa:38817-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf/info/537a8a7bc65341d0b640a2668005f3d3 because midkey is the same as first or last row 2024-11-11T04:34:44,218 DEBUG [RS:0;a7bef91497aa:38817-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.3 K, sizeToCheck=16.0 K 2024-11-11T04:34:44,218 DEBUG [RS:0;a7bef91497aa:38817-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T04:34:44,218 DEBUG [RS:0;a7bef91497aa:38817-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf/info/537a8a7bc65341d0b640a2668005f3d3 because midkey is the same as first or last row 2024-11-11T04:34:44,218 DEBUG [RS:0;a7bef91497aa:38817-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.3 K, sizeToCheck=16.0 K 2024-11-11T04:34:44,218 DEBUG [RS:0;a7bef91497aa:38817-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T04:34:44,218 DEBUG [RS:0;a7bef91497aa:38817-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf/info/537a8a7bc65341d0b640a2668005f3d3 because midkey is the same as first or last row 2024-11-11T04:34:44,218 DEBUG [RS:0;a7bef91497aa:38817-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T04:34:44,218 DEBUG [RS:0;a7bef91497aa:38817-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6ddada07afcf2fed42f4de9080a351bf:info 2024-11-11T04:34:44,307 INFO [regionserver/a7bef91497aa:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46201,DS-a4611af3-51d9-4d80-83b3-808bf9f260d5,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:44,307 INFO [regionserver/a7bef91497aa:0.logRoller {}] wal.FSHLog(556): LowReplication-Roller was enabled. 2024-11-11T04:34:44,334 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.1731299668290 to hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/oldWALs/a7bef91497aa%2C38817%2C1731299646525.1731299668290 2024-11-11T04:34:44,357 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-11T04:34:44,358 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-11T04:34:44,358 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T04:34:44,358 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T04:34:44,358 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T04:34:44,358 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-11T04:34:44,358 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-11T04:34:44,358 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1223048069, stopped=false 2024-11-11T04:34:44,358 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=a7bef91497aa,34363,1731299646472 2024-11-11T04:34:44,360 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34363-0x101959b91600000, quorum=127.0.0.1:51151, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-11T04:34:44,360 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38817-0x101959b91600001, quorum=127.0.0.1:51151, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-11T04:34:44,360 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40761-0x101959b91600002, quorum=127.0.0.1:51151, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-11T04:34:44,360 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34363-0x101959b91600000, quorum=127.0.0.1:51151, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:34:44,360 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38817-0x101959b91600001, quorum=127.0.0.1:51151, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:34:44,360 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40761-0x101959b91600002, quorum=127.0.0.1:51151, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:34:44,360 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-11T04:34:44,361 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-11T04:34:44,361 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T04:34:44,361 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T04:34:44,361 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'a7bef91497aa,38817,1731299646525' ***** 2024-11-11T04:34:44,361 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-11T04:34:44,361 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:34363-0x101959b91600000, quorum=127.0.0.1:51151, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T04:34:44,361 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'a7bef91497aa,40761,1731299647475' ***** 2024-11-11T04:34:44,361 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-11T04:34:44,361 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:38817-0x101959b91600001, quorum=127.0.0.1:51151, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T04:34:44,361 INFO [RS:0;a7bef91497aa:38817 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-11T04:34:44,362 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:40761-0x101959b91600002, quorum=127.0.0.1:51151, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T04:34:44,362 INFO [RS:1;a7bef91497aa:40761 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-11T04:34:44,362 INFO [RS:0;a7bef91497aa:38817 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-11T04:34:44,362 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-11T04:34:44,362 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-11T04:34:44,362 INFO [RS:0;a7bef91497aa:38817 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-11T04:34:44,362 INFO [RS:0;a7bef91497aa:38817 {}] regionserver.HRegionServer(3091): Received CLOSE for 6ddada07afcf2fed42f4de9080a351bf 2024-11-11T04:34:44,362 INFO [RS:1;a7bef91497aa:40761 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-11T04:34:44,362 INFO [RS:1;a7bef91497aa:40761 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-11T04:34:44,362 INFO [RS:1;a7bef91497aa:40761 {}] regionserver.HRegionServer(959): stopping server a7bef91497aa,40761,1731299647475 2024-11-11T04:34:44,362 INFO [RS:1;a7bef91497aa:40761 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-11T04:34:44,362 INFO [RS:0;a7bef91497aa:38817 {}] regionserver.HRegionServer(959): stopping server a7bef91497aa,38817,1731299646525 2024-11-11T04:34:44,362 INFO [RS:0;a7bef91497aa:38817 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-11T04:34:44,362 INFO [RS:1;a7bef91497aa:40761 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;a7bef91497aa:40761. 2024-11-11T04:34:44,362 INFO [RS:0;a7bef91497aa:38817 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;a7bef91497aa:38817. 2024-11-11T04:34:44,362 DEBUG [RS:1;a7bef91497aa:40761 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T04:34:44,362 DEBUG [RS:1;a7bef91497aa:40761 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T04:34:44,362 DEBUG [RS:0;a7bef91497aa:38817 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T04:34:44,363 DEBUG [RS:0;a7bef91497aa:38817 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T04:34:44,363 INFO [RS:1;a7bef91497aa:40761 {}] regionserver.HRegionServer(976): stopping server a7bef91497aa,40761,1731299647475; all regions closed. 2024-11-11T04:34:44,363 INFO [RS:0;a7bef91497aa:38817 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-11T04:34:44,363 INFO [RS:0;a7bef91497aa:38817 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-11T04:34:44,363 INFO [RS:0;a7bef91497aa:38817 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-11T04:34:44,363 INFO [RS:0;a7bef91497aa:38817 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-11T04:34:44,363 INFO [RS:0;a7bef91497aa:38817 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-11T04:34:44,363 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:34:44,363 DEBUG [RS:0;a7bef91497aa:38817 {}] regionserver.HRegionServer(1325): Online Regions={6ddada07afcf2fed42f4de9080a351bf=TestLogRolling-testLogRollOnDatanodeDeath,,1731299647559.6ddada07afcf2fed42f4de9080a351bf., 1588230740=hbase:meta,,1.1588230740} 2024-11-11T04:34:44,363 DEBUG [RS:0;a7bef91497aa:38817 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 6ddada07afcf2fed42f4de9080a351bf 2024-11-11T04:34:44,363 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:34:44,363 DEBUG [RS_CLOSE_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 6ddada07afcf2fed42f4de9080a351bf, disabling compactions & flushes 2024-11-11T04:34:44,363 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:34:44,363 INFO [RS_CLOSE_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1731299647559.6ddada07afcf2fed42f4de9080a351bf. 2024-11-11T04:34:44,363 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:34:44,363 DEBUG [RS_CLOSE_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731299647559.6ddada07afcf2fed42f4de9080a351bf. 2024-11-11T04:34:44,364 DEBUG [RS_CLOSE_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731299647559.6ddada07afcf2fed42f4de9080a351bf. after waiting 0 ms 2024-11-11T04:34:44,364 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:34:44,364 DEBUG [RS_CLOSE_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1731299647559.6ddada07afcf2fed42f4de9080a351bf. 2024-11-11T04:34:44,364 DEBUG [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-11T04:34:44,364 INFO [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-11T04:34:44,364 DEBUG [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-11T04:34:44,364 DEBUG [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-11T04:34:44,364 DEBUG [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-11T04:34:44,364 INFO [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.71 KB heapSize=3.75 KB 2024-11-11T04:34:44,364 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731299647559.6ddada07afcf2fed42f4de9080a351bf.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf/info/2962c2a55e4b458e93d24dbc48216093, hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf/info/9536cc38e21e4b5fb7e4fe6ec9e57a23, hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf/info/9025954da55f4d128b42822fc60ea5b7, hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf/info/4f9b612e2b734cddaf316fa0ebb7c9e4, hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf/info/5184cac00a10495c9b0aa7d51dadd8a3, hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf/info/5a577459378146e6b187f01e01a550d8, hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf/info/1b989ae92692401cb4c62ca105e86bc7, hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf/info/579633d2829e49ca993a1448af16e0cb, hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf/info/52003d6e50924aedad07ea715cf2e7e2] to archive 2024-11-11T04:34:44,364 ERROR [FSHLog-0-hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094-prefix:a7bef91497aa,38817,1731299646525.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46201,DS-a4611af3-51d9-4d80-83b3-808bf9f260d5,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:44,365 WARN [FSHLog-0-hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094-prefix:a7bef91497aa,38817,1731299646525.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46201,DS-a4611af3-51d9-4d80-83b3-808bf9f260d5,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:44,365 DEBUG [regionserver/a7bef91497aa:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog a7bef91497aa%2C38817%2C1731299646525.meta:.meta(num 1731299647327) roll requested 2024-11-11T04:34:44,365 INFO [regionserver/a7bef91497aa:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor a7bef91497aa%2C38817%2C1731299646525.meta.1731299684365.meta 2024-11-11T04:34:44,365 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731299647559.6ddada07afcf2fed42f4de9080a351bf.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-11T04:34:44,367 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731299647559.6ddada07afcf2fed42f4de9080a351bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf/info/2962c2a55e4b458e93d24dbc48216093 to hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf/info/2962c2a55e4b458e93d24dbc48216093 2024-11-11T04:34:44,368 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46201,DS-a4611af3-51d9-4d80-83b3-808bf9f260d5,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:44,368 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46201,DS-a4611af3-51d9-4d80-83b3-808bf9f260d5,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:44,368 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 2024-11-11T04:34:44,369 WARN [IPC Server handler 3 on default port 41327 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 has not been closed. Lease recovery is in progress. RecoveryId = 1080 for block blk_1073741837_1013 2024-11-11T04:34:44,369 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731299647559.6ddada07afcf2fed42f4de9080a351bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf/info/9536cc38e21e4b5fb7e4fe6ec9e57a23 to hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf/info/9536cc38e21e4b5fb7e4fe6ec9e57a23 2024-11-11T04:34:44,369 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 after 1ms 2024-11-11T04:34:44,370 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731299647559.6ddada07afcf2fed42f4de9080a351bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf/info/9025954da55f4d128b42822fc60ea5b7 to hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf/info/9025954da55f4d128b42822fc60ea5b7 2024-11-11T04:34:44,371 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731299647559.6ddada07afcf2fed42f4de9080a351bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf/info/4f9b612e2b734cddaf316fa0ebb7c9e4 to hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf/info/4f9b612e2b734cddaf316fa0ebb7c9e4 2024-11-11T04:34:44,373 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731299647559.6ddada07afcf2fed42f4de9080a351bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf/info/5184cac00a10495c9b0aa7d51dadd8a3 to hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf/info/5184cac00a10495c9b0aa7d51dadd8a3 2024-11-11T04:34:44,374 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731299647559.6ddada07afcf2fed42f4de9080a351bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf/info/5a577459378146e6b187f01e01a550d8 to hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf/info/5a577459378146e6b187f01e01a550d8 2024-11-11T04:34:44,375 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731299647559.6ddada07afcf2fed42f4de9080a351bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf/info/1b989ae92692401cb4c62ca105e86bc7 to hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf/info/1b989ae92692401cb4c62ca105e86bc7 2024-11-11T04:34:44,376 WARN [Thread-1057 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741896_1081 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:44,376 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731299647559.6ddada07afcf2fed42f4de9080a351bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf/info/579633d2829e49ca993a1448af16e0cb to hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf/info/579633d2829e49ca993a1448af16e0cb 2024-11-11T04:34:44,376 WARN [Thread-1057 {}] hdfs.DataStreamer(1731): Error Recovery for BP-454738336-172.17.0.2-1731299645786:blk_1073741896_1081 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32857,DS-0c790adc-35bd-4d1e-9dfa-17a5b4907a46,DISK], DatanodeInfoWithStorage[127.0.0.1:45993,DS-96e399a0-103e-4b17-ba00-79e62af7c104,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32857,DS-0c790adc-35bd-4d1e-9dfa-17a5b4907a46,DISK]) is bad. 2024-11-11T04:34:44,376 WARN [Thread-1057 {}] hdfs.DataStreamer(1850): Abandoning BP-454738336-172.17.0.2-1731299645786:blk_1073741896_1081 2024-11-11T04:34:44,377 WARN [Thread-1057 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32857,DS-0c790adc-35bd-4d1e-9dfa-17a5b4907a46,DISK] 2024-11-11T04:34:44,378 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731299647559.6ddada07afcf2fed42f4de9080a351bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf/info/52003d6e50924aedad07ea715cf2e7e2 to hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf/info/52003d6e50924aedad07ea715cf2e7e2 2024-11-11T04:34:44,379 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731299647559.6ddada07afcf2fed42f4de9080a351bf.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=a7bef91497aa:34363 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-11-11T04:34:44,379 WARN [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731299647559.6ddada07afcf2fed42f4de9080a351bf.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [2962c2a55e4b458e93d24dbc48216093=10347, 9536cc38e21e4b5fb7e4fe6ec9e57a23=12506, 9025954da55f4d128b42822fc60ea5b7=17994, 4f9b612e2b734cddaf316fa0ebb7c9e4=6027, 5184cac00a10495c9b0aa7d51dadd8a3=6027, 5a577459378146e6b187f01e01a550d8=18097, 1b989ae92692401cb4c62ca105e86bc7=6027, 579633d2829e49ca993a1448af16e0cb=8190, 52003d6e50924aedad07ea715cf2e7e2=14660] 2024-11-11T04:34:44,384 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:34:44,385 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:34:44,385 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:34:44,385 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:34:44,385 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:34:44,385 INFO [regionserver/a7bef91497aa:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta with entries=8, filesize=2.33 KB; new WAL /user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299684365.meta 2024-11-11T04:34:44,386 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46201,DS-a4611af3-51d9-4d80-83b3-808bf9f260d5,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:44,386 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46201,DS-a4611af3-51d9-4d80-83b3-808bf9f260d5,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:44,386 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta 2024-11-11T04:34:44,386 WARN [IPC Server handler 4 on default port 41327 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta has not been closed. Lease recovery is in progress. RecoveryId = 1083 for block blk_1073741834_1010 2024-11-11T04:34:44,386 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta after 0ms 2024-11-11T04:34:44,388 DEBUG [RS_CLOSE_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6ddada07afcf2fed42f4de9080a351bf/recovered.edits/83.seqid, newMaxSeqId=83, maxSeqId=1 2024-11-11T04:34:44,389 DEBUG [regionserver/a7bef91497aa:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35567:35567),(127.0.0.1/127.0.0.1:43605:43605)] 2024-11-11T04:34:44,389 DEBUG [regionserver/a7bef91497aa:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta is not closed yet, will try archiving it next time 2024-11-11T04:34:44,389 INFO [RS_CLOSE_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731299647559.6ddada07afcf2fed42f4de9080a351bf. 2024-11-11T04:34:44,389 DEBUG [RS_CLOSE_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 6ddada07afcf2fed42f4de9080a351bf: Waiting for close lock at 1731299684363Running coprocessor pre-close hooks at 1731299684363Disabling compacts and flushes for region at 1731299684363Disabling writes for close at 1731299684364 (+1 ms)Writing region close event to WAL at 1731299684383 (+19 ms)Running coprocessor post-close hooks at 1731299684389 (+6 ms)Closed at 1731299684389 2024-11-11T04:34:44,389 DEBUG [RS_CLOSE_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731299647559.6ddada07afcf2fed42f4de9080a351bf. 2024-11-11T04:34:44,406 DEBUG [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/hbase/meta/1588230740/.tmp/info/836dda4412b9471085b1521b449a8606 is 203, key is TestLogRolling-testLogRollOnDatanodeDeath,,1731299647559.6ddada07afcf2fed42f4de9080a351bf./info:regioninfo/1731299647921/Put/seqid=0 2024-11-11T04:34:44,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35251 is added to blk_1073741898_1084 (size=7089) 2024-11-11T04:34:44,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45993 is added to blk_1073741898_1084 (size=7089) 2024-11-11T04:34:44,413 INFO [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.50 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/hbase/meta/1588230740/.tmp/info/836dda4412b9471085b1521b449a8606 2024-11-11T04:34:44,434 DEBUG [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/hbase/meta/1588230740/.tmp/ns/e502116365e3420182df6d981d1c5261 is 43, key is default/ns:d/1731299647369/Put/seqid=0 2024-11-11T04:34:44,437 WARN [Thread-1070 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741899_1085 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:32857 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:34:44,437 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-667751035_22 at /127.0.0.1:56098 [Receiving block BP-454738336-172.17.0.2-1731299645786:blk_1073741899_1085] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/cluster_8b97c0a2-b56e-2508-7199-0bcaa1af3817/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/cluster_8b97c0a2-b56e-2508-7199-0bcaa1af3817/data/data8]'}, localName='127.0.0.1:45993', datanodeUuid='c22f2d70-f590-45e9-9ca1-b877c52e1b27', xmitsInProgress=0}:Exception transferring block BP-454738336-172.17.0.2-1731299645786:blk_1073741899_1085 to mirror 127.0.0.1:32857 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:34:44,437 WARN [Thread-1070 {}] hdfs.DataStreamer(1731): Error Recovery for BP-454738336-172.17.0.2-1731299645786:blk_1073741899_1085 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45993,DS-96e399a0-103e-4b17-ba00-79e62af7c104,DISK], DatanodeInfoWithStorage[127.0.0.1:32857,DS-0c790adc-35bd-4d1e-9dfa-17a5b4907a46,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:32857,DS-0c790adc-35bd-4d1e-9dfa-17a5b4907a46,DISK]) is bad. 2024-11-11T04:34:44,437 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-667751035_22 at /127.0.0.1:56098 [Receiving block BP-454738336-172.17.0.2-1731299645786:blk_1073741899_1085] {}] datanode.BlockReceiver(316): Block 1073741899 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-11T04:34:44,437 WARN [Thread-1070 {}] hdfs.DataStreamer(1850): Abandoning BP-454738336-172.17.0.2-1731299645786:blk_1073741899_1085 2024-11-11T04:34:44,437 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-667751035_22 at /127.0.0.1:56098 [Receiving block BP-454738336-172.17.0.2-1731299645786:blk_1073741899_1085] {}] datanode.DataXceiver(331): 127.0.0.1:45993:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56098 dst: /127.0.0.1:45993 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:34:44,438 WARN [Thread-1070 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32857,DS-0c790adc-35bd-4d1e-9dfa-17a5b4907a46,DISK] 2024-11-11T04:34:44,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35251 is added to blk_1073741900_1086 (size=5153) 2024-11-11T04:34:44,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45993 is added to blk_1073741900_1086 (size=5153) 2024-11-11T04:34:44,443 INFO [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/hbase/meta/1588230740/.tmp/ns/e502116365e3420182df6d981d1c5261 2024-11-11T04:34:44,465 DEBUG [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/hbase/meta/1588230740/.tmp/table/fcc57cceeee84ada870dcdec2115c593 is 77, key is TestLogRolling-testLogRollOnDatanodeDeath/table:state/1731299647932/Put/seqid=0 2024-11-11T04:34:44,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35251 is added to blk_1073741901_1087 (size=5424) 2024-11-11T04:34:44,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45993 is added to blk_1073741901_1087 (size=5424) 2024-11-11T04:34:44,470 INFO [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=146 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/hbase/meta/1588230740/.tmp/table/fcc57cceeee84ada870dcdec2115c593 2024-11-11T04:34:44,476 DEBUG [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/hbase/meta/1588230740/.tmp/info/836dda4412b9471085b1521b449a8606 as hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/hbase/meta/1588230740/info/836dda4412b9471085b1521b449a8606 2024-11-11T04:34:44,481 INFO [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/hbase/meta/1588230740/info/836dda4412b9471085b1521b449a8606, entries=10, sequenceid=11, filesize=6.9 K 2024-11-11T04:34:44,482 DEBUG [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/hbase/meta/1588230740/.tmp/ns/e502116365e3420182df6d981d1c5261 as hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/hbase/meta/1588230740/ns/e502116365e3420182df6d981d1c5261 2024-11-11T04:34:44,488 INFO [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/hbase/meta/1588230740/ns/e502116365e3420182df6d981d1c5261, entries=2, sequenceid=11, filesize=5.0 K 2024-11-11T04:34:44,489 DEBUG [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/hbase/meta/1588230740/.tmp/table/fcc57cceeee84ada870dcdec2115c593 as hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/hbase/meta/1588230740/table/fcc57cceeee84ada870dcdec2115c593 2024-11-11T04:34:44,495 INFO [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/hbase/meta/1588230740/table/fcc57cceeee84ada870dcdec2115c593, entries=2, sequenceid=11, filesize=5.3 K 2024-11-11T04:34:44,496 INFO [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 132ms, sequenceid=11, compaction requested=false 2024-11-11T04:34:44,502 DEBUG [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-11T04:34:44,502 DEBUG [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-11T04:34:44,502 INFO [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-11T04:34:44,503 DEBUG [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731299684364Running coprocessor pre-close hooks at 1731299684364Disabling compacts and flushes for region at 1731299684364Disabling writes for close at 1731299684364Obtaining lock to block concurrent updates at 1731299684364Preparing flush snapshotting stores in 1588230740 at 1731299684364Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1752, getHeapSize=3776, getOffHeapSize=0, getCellsCount=14 at 1731299684364Flushing stores of hbase:meta,,1.1588230740 at 1731299684390 (+26 ms)Flushing 1588230740/info: creating writer at 1731299684390Flushing 1588230740/info: appending metadata at 1731299684406 (+16 ms)Flushing 1588230740/info: closing flushed file at 1731299684406Flushing 1588230740/ns: creating writer at 1731299684419 (+13 ms)Flushing 1588230740/ns: appending metadata at 1731299684434 (+15 ms)Flushing 1588230740/ns: closing flushed file at 1731299684434Flushing 1588230740/table: creating writer at 1731299684449 (+15 ms)Flushing 1588230740/table: appending metadata at 1731299684464 (+15 ms)Flushing 1588230740/table: closing flushed file at 1731299684464Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@75508987: reopening flushed file at 1731299684475 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@42d212cd: reopening flushed file at 1731299684482 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@385d70a7: reopening flushed file at 1731299684488 (+6 ms)Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 132ms, sequenceid=11, compaction requested=false at 1731299684497 (+9 ms)Writing region close event to WAL at 1731299684498 (+1 ms)Running coprocessor post-close hooks at 1731299684502 (+4 ms)Closed at 1731299684502 2024-11-11T04:34:44,503 DEBUG [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-11T04:34:44,563 INFO [RS:0;a7bef91497aa:38817 {}] regionserver.HRegionServer(976): stopping server a7bef91497aa,38817,1731299646525; all regions closed. 2024-11-11T04:34:44,564 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:34:44,564 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:34:44,564 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:34:44,564 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:34:44,564 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:34:44,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45993 is added to blk_1073741897_1082 (size=825) 2024-11-11T04:34:44,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35251 is added to blk_1073741897_1082 (size=825) 2024-11-11T04:34:44,611 INFO [regionserver/a7bef91497aa:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-11T04:34:44,611 INFO [regionserver/a7bef91497aa:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-11T04:34:44,813 INFO [regionserver/a7bef91497aa:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T04:34:44,826 INFO [regionserver/a7bef91497aa:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-11T04:34:44,826 INFO [regionserver/a7bef91497aa:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-11T04:34:45,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35251 is added to blk_1073741876_1059 (size=12911) 2024-11-11T04:34:45,528 INFO [regionserver/a7bef91497aa:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T04:34:45,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45993 is added to blk_1073741831_1007 (size=1321) 2024-11-11T04:34:45,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45993 is added to blk_1073741829_1005 (size=34) 2024-11-11T04:34:46,608 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@1e1a1af9[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:35251, datanodeUuid=9cbb48e6-377e-490c-b963-e735d7cbd305, infoPort=35567, infoSecurePort=0, ipcPort=46615, storageInfo=lv=-57;cid=testClusterID;nsid=1450183398;c=1731299645786):Failed to transfer BP-454738336-172.17.0.2-1731299645786:blk_1073741827_1003 to 127.0.0.1:32857 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:34:46,608 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@19545628[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:35251, datanodeUuid=9cbb48e6-377e-490c-b963-e735d7cbd305, infoPort=35567, infoSecurePort=0, ipcPort=46615, storageInfo=lv=-57;cid=testClusterID;nsid=1450183398;c=1731299645786):Failed to transfer BP-454738336-172.17.0.2-1731299645786:blk_1073741825_1001 to 127.0.0.1:32857 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:34:47,456 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-11T04:34:47,457 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-11T04:34:47,457 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-11T04:34:47,956 INFO [master/a7bef91497aa:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-11T04:34:47,956 INFO [master/a7bef91497aa:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-11T04:34:48,370 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 after 4002ms 2024-11-11T04:34:48,387 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta after 4001ms 2024-11-11T04:34:48,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45993 is added to blk_1073741826_1002 (size=42) 2024-11-11T04:34:48,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45993 is added to blk_1073741835_1011 (size=393) 2024-11-11T04:34:49,368 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-11T04:34:49,370 DEBUG [RS:1;a7bef91497aa:40761 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/oldWALs 2024-11-11T04:34:49,370 INFO [RS:1;a7bef91497aa:40761 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog a7bef91497aa%2C40761%2C1731299647475:(num 1731299647660) 2024-11-11T04:34:49,370 DEBUG [RS:1;a7bef91497aa:40761 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T04:34:49,371 INFO [RS:1;a7bef91497aa:40761 {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T04:34:49,371 INFO [RS:1;a7bef91497aa:40761 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-11T04:34:49,371 INFO [RS:1;a7bef91497aa:40761 {}] hbase.ChoreService(370): Chore service for: regionserver/a7bef91497aa:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-11T04:34:49,371 INFO [RS:1;a7bef91497aa:40761 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-11T04:34:49,371 INFO [RS:1;a7bef91497aa:40761 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-11T04:34:49,371 INFO [regionserver/a7bef91497aa:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-11T04:34:49,371 INFO [RS:1;a7bef91497aa:40761 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-11T04:34:49,371 INFO [RS:1;a7bef91497aa:40761 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-11T04:34:49,371 INFO [RS:1;a7bef91497aa:40761 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40761 2024-11-11T04:34:49,373 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40761-0x101959b91600002, quorum=127.0.0.1:51151, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/a7bef91497aa,40761,1731299647475 2024-11-11T04:34:49,373 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34363-0x101959b91600000, quorum=127.0.0.1:51151, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-11T04:34:49,373 INFO [RS:1;a7bef91497aa:40761 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-11T04:34:49,375 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [a7bef91497aa,40761,1731299647475] 2024-11-11T04:34:49,375 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:34:49,377 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/a7bef91497aa,40761,1731299647475 already deleted, retry=false 2024-11-11T04:34:49,377 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; a7bef91497aa,40761,1731299647475 expired; onlineServers=1 2024-11-11T04:34:49,390 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:34:49,411 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:34:49,412 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:34:49,413 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:34:49,413 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:34:49,413 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:34:49,423 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:34:49,424 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:34:49,475 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40761-0x101959b91600002, quorum=127.0.0.1:51151, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T04:34:49,475 INFO [RS:1;a7bef91497aa:40761 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-11T04:34:49,475 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40761-0x101959b91600002, quorum=127.0.0.1:51151, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T04:34:49,475 INFO [RS:1;a7bef91497aa:40761 {}] regionserver.HRegionServer(1031): Exiting; stopping=a7bef91497aa,40761,1731299647475; zookeeper connection closed. 2024-11-11T04:34:49,476 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@509d9446 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@509d9446 2024-11-11T04:34:49,565 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-11T04:34:49,568 DEBUG [RS:0;a7bef91497aa:38817 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/oldWALs 2024-11-11T04:34:49,568 INFO [RS:0;a7bef91497aa:38817 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog a7bef91497aa%2C38817%2C1731299646525.meta:.meta(num 1731299684365) 2024-11-11T04:34:49,569 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:34:49,569 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:34:49,569 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:34:49,569 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:34:49,569 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:34:49,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45993 is added to blk_1073741890_1074 (size=16308) 2024-11-11T04:34:49,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35251 is added to blk_1073741890_1074 (size=16308) 2024-11-11T04:34:49,573 DEBUG [RS:0;a7bef91497aa:38817 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/oldWALs 2024-11-11T04:34:49,573 INFO [RS:0;a7bef91497aa:38817 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog a7bef91497aa%2C38817%2C1731299646525:(num 1731299683925) 2024-11-11T04:34:49,573 DEBUG [RS:0;a7bef91497aa:38817 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T04:34:49,573 INFO [RS:0;a7bef91497aa:38817 {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T04:34:49,573 INFO [RS:0;a7bef91497aa:38817 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-11T04:34:49,574 INFO [RS:0;a7bef91497aa:38817 {}] hbase.ChoreService(370): Chore service for: regionserver/a7bef91497aa:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-11T04:34:49,574 INFO [RS:0;a7bef91497aa:38817 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-11T04:34:49,574 INFO [regionserver/a7bef91497aa:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-11T04:34:49,574 INFO [RS:0;a7bef91497aa:38817 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38817 2024-11-11T04:34:49,577 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38817-0x101959b91600001, quorum=127.0.0.1:51151, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/a7bef91497aa,38817,1731299646525 2024-11-11T04:34:49,577 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34363-0x101959b91600000, quorum=127.0.0.1:51151, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-11T04:34:49,577 INFO [RS:0;a7bef91497aa:38817 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-11T04:34:49,578 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [a7bef91497aa,38817,1731299646525] 2024-11-11T04:34:49,580 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/a7bef91497aa,38817,1731299646525 already deleted, retry=false 2024-11-11T04:34:49,580 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; a7bef91497aa,38817,1731299646525 expired; onlineServers=0 2024-11-11T04:34:49,580 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'a7bef91497aa,34363,1731299646472' ***** 2024-11-11T04:34:49,580 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-11T04:34:49,580 INFO [M:0;a7bef91497aa:34363 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-11T04:34:49,580 INFO [M:0;a7bef91497aa:34363 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-11T04:34:49,580 DEBUG [M:0;a7bef91497aa:34363 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-11T04:34:49,580 DEBUG [M:0;a7bef91497aa:34363 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-11T04:34:49,580 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-11T04:34:49,580 DEBUG [master/a7bef91497aa:0:becomeActiveMaster-HFileCleaner.large.0-1731299646714 {}] cleaner.HFileCleaner(306): Exit Thread[master/a7bef91497aa:0:becomeActiveMaster-HFileCleaner.large.0-1731299646714,5,FailOnTimeoutGroup] 2024-11-11T04:34:49,580 DEBUG [master/a7bef91497aa:0:becomeActiveMaster-HFileCleaner.small.0-1731299646715 {}] cleaner.HFileCleaner(306): Exit Thread[master/a7bef91497aa:0:becomeActiveMaster-HFileCleaner.small.0-1731299646715,5,FailOnTimeoutGroup] 2024-11-11T04:34:49,580 INFO [M:0;a7bef91497aa:34363 {}] hbase.ChoreService(370): Chore service for: master/a7bef91497aa:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-11T04:34:49,580 INFO [M:0;a7bef91497aa:34363 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-11T04:34:49,580 DEBUG [M:0;a7bef91497aa:34363 {}] master.HMaster(1795): Stopping service threads 2024-11-11T04:34:49,580 INFO [M:0;a7bef91497aa:34363 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-11T04:34:49,580 INFO [M:0;a7bef91497aa:34363 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-11T04:34:49,581 INFO [M:0;a7bef91497aa:34363 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-11T04:34:49,581 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-11T04:34:49,582 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34363-0x101959b91600000, quorum=127.0.0.1:51151, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-11T04:34:49,582 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34363-0x101959b91600000, quorum=127.0.0.1:51151, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:34:49,582 DEBUG [M:0;a7bef91497aa:34363 {}] zookeeper.ZKUtil(347): master:34363-0x101959b91600000, quorum=127.0.0.1:51151, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-11T04:34:49,582 WARN [M:0;a7bef91497aa:34363 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-11T04:34:49,582 INFO [M:0;a7bef91497aa:34363 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/.lastflushedseqids 2024-11-11T04:34:49,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35251 is added to blk_1073741902_1088 (size=130) 2024-11-11T04:34:49,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45993 is added to blk_1073741902_1088 (size=130) 2024-11-11T04:34:49,588 INFO [M:0;a7bef91497aa:34363 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-11T04:34:49,588 INFO [M:0;a7bef91497aa:34363 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-11T04:34:49,588 DEBUG [M:0;a7bef91497aa:34363 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-11T04:34:49,588 INFO [M:0;a7bef91497aa:34363 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T04:34:49,588 DEBUG [M:0;a7bef91497aa:34363 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T04:34:49,588 DEBUG [M:0;a7bef91497aa:34363 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-11T04:34:49,588 DEBUG [M:0;a7bef91497aa:34363 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T04:34:49,588 INFO [M:0;a7bef91497aa:34363 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.25 KB heapSize=29.49 KB 2024-11-11T04:34:49,604 DEBUG [M:0;a7bef91497aa:34363 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/e4dec01d7b474949bb60a4c0cb6d15a2 is 82, key is hbase:meta,,1/info:regioninfo/1731299647353/Put/seqid=0 2024-11-11T04:34:49,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35251 is added to blk_1073741903_1089 (size=5672) 2024-11-11T04:34:49,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45993 is added to blk_1073741903_1089 (size=5672) 2024-11-11T04:34:49,610 INFO [M:0;a7bef91497aa:34363 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/e4dec01d7b474949bb60a4c0cb6d15a2 2024-11-11T04:34:49,630 DEBUG [M:0;a7bef91497aa:34363 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/d8b87f6a7c4146338dbd7f05d4d03808 is 774, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731299647936/Put/seqid=0 2024-11-11T04:34:49,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45993 is added to blk_1073741904_1090 (size=6255) 2024-11-11T04:34:49,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35251 is added to blk_1073741904_1090 (size=6255) 2024-11-11T04:34:49,635 INFO [M:0;a7bef91497aa:34363 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.58 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/d8b87f6a7c4146338dbd7f05d4d03808 2024-11-11T04:34:49,640 INFO [M:0;a7bef91497aa:34363 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for d8b87f6a7c4146338dbd7f05d4d03808 2024-11-11T04:34:49,655 DEBUG [M:0;a7bef91497aa:34363 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/b2162cbb8b0e46939f851cdbe321b1a9 is 69, key is a7bef91497aa,38817,1731299646525/rs:state/1731299646769/Put/seqid=0 2024-11-11T04:34:49,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45993 is added to blk_1073741905_1091 (size=5224) 2024-11-11T04:34:49,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35251 is added to blk_1073741905_1091 (size=5224) 2024-11-11T04:34:49,660 INFO [M:0;a7bef91497aa:34363 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=130 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/b2162cbb8b0e46939f851cdbe321b1a9 2024-11-11T04:34:49,679 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38817-0x101959b91600001, quorum=127.0.0.1:51151, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T04:34:49,679 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38817-0x101959b91600001, quorum=127.0.0.1:51151, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T04:34:49,679 INFO [RS:0;a7bef91497aa:38817 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-11T04:34:49,679 INFO [RS:0;a7bef91497aa:38817 {}] regionserver.HRegionServer(1031): Exiting; stopping=a7bef91497aa,38817,1731299646525; zookeeper connection closed. 2024-11-11T04:34:49,679 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@7f91bf04 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@7f91bf04 2024-11-11T04:34:49,679 DEBUG [M:0;a7bef91497aa:34363 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/127720590a37431a9e74f31fa449cedb is 52, key is load_balancer_on/state:d/1731299647459/Put/seqid=0 2024-11-11T04:34:49,679 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 2 regionserver(s) complete 2024-11-11T04:34:49,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35251 is added to blk_1073741906_1092 (size=5056) 2024-11-11T04:34:49,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45993 is added to blk_1073741906_1092 (size=5056) 2024-11-11T04:34:49,684 INFO [M:0;a7bef91497aa:34363 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/127720590a37431a9e74f31fa449cedb 2024-11-11T04:34:49,689 DEBUG [M:0;a7bef91497aa:34363 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/e4dec01d7b474949bb60a4c0cb6d15a2 as hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/e4dec01d7b474949bb60a4c0cb6d15a2 2024-11-11T04:34:49,694 INFO [M:0;a7bef91497aa:34363 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/e4dec01d7b474949bb60a4c0cb6d15a2, entries=8, sequenceid=60, filesize=5.5 K 2024-11-11T04:34:49,694 DEBUG [M:0;a7bef91497aa:34363 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/d8b87f6a7c4146338dbd7f05d4d03808 as hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/d8b87f6a7c4146338dbd7f05d4d03808 2024-11-11T04:34:49,699 INFO [M:0;a7bef91497aa:34363 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for d8b87f6a7c4146338dbd7f05d4d03808 2024-11-11T04:34:49,699 INFO [M:0;a7bef91497aa:34363 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/d8b87f6a7c4146338dbd7f05d4d03808, entries=6, sequenceid=60, filesize=6.1 K 2024-11-11T04:34:49,700 DEBUG [M:0;a7bef91497aa:34363 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/b2162cbb8b0e46939f851cdbe321b1a9 as hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/b2162cbb8b0e46939f851cdbe321b1a9 2024-11-11T04:34:49,704 INFO [M:0;a7bef91497aa:34363 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/b2162cbb8b0e46939f851cdbe321b1a9, entries=2, sequenceid=60, filesize=5.1 K 2024-11-11T04:34:49,705 DEBUG [M:0;a7bef91497aa:34363 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/127720590a37431a9e74f31fa449cedb as hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/127720590a37431a9e74f31fa449cedb 2024-11-11T04:34:49,709 INFO [M:0;a7bef91497aa:34363 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/127720590a37431a9e74f31fa449cedb, entries=1, sequenceid=60, filesize=4.9 K 2024-11-11T04:34:49,711 INFO [M:0;a7bef91497aa:34363 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.25 KB/23805, heapSize ~29.43 KB/30136, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 122ms, sequenceid=60, compaction requested=false 2024-11-11T04:34:49,712 INFO [M:0;a7bef91497aa:34363 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T04:34:49,712 DEBUG [M:0;a7bef91497aa:34363 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731299689588Disabling compacts and flushes for region at 1731299689588Disabling writes for close at 1731299689588Obtaining lock to block concurrent updates at 1731299689588Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731299689588Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23805, getHeapSize=30136, getOffHeapSize=0, getCellsCount=71 at 1731299689589 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731299689589Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731299689589Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731299689604 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731299689604Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731299689615 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731299689630 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731299689630Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731299689640 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731299689654 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731299689654Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731299689664 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731299689679 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731299689679Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@90202bd: reopening flushed file at 1731299689688 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7231f2bd: reopening flushed file at 1731299689694 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@55a54e99: reopening flushed file at 1731299689699 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4684f6b2: reopening flushed file at 1731299689704 (+5 ms)Finished flush of dataSize ~23.25 KB/23805, heapSize ~29.43 KB/30136, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 122ms, sequenceid=60, compaction requested=false at 1731299689711 (+7 ms)Writing region close event to WAL at 1731299689712 (+1 ms)Closed at 1731299689712 2024-11-11T04:34:49,713 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:34:49,713 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:34:49,713 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:34:49,713 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:34:49,713 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:34:49,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35251 is added to blk_1073741889_1072 (size=1045) 2024-11-11T04:34:49,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45993 is added to blk_1073741889_1072 (size=1045) 2024-11-11T04:34:49,926 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-11T04:34:49,939 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:34:49,940 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:34:49,940 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:34:49,940 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:34:49,940 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:34:49,941 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:34:49,944 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:34:49,946 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:34:50,377 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:34:50,388 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:34:50,630 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@61db27c9 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-454738336-172.17.0.2-1731299645786:blk_1073741830_1006, datanode=DatanodeInfoWithStorage[127.0.0.1:46201,null,null]) java.net.ConnectException: Call From a7bef91497aa/172.17.0.2 to localhost:42071 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-11T04:34:50,734 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/MasterData/WALs/a7bef91497aa,34363,1731299646472/a7bef91497aa%2C34363%2C1731299646472.1731299646634 to hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/MasterData/oldWALs/a7bef91497aa%2C34363%2C1731299646472.1731299646634 2024-11-11T04:34:50,738 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/MasterData/oldWALs/a7bef91497aa%2C34363%2C1731299646472.1731299646634 to hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/oldWALs/a7bef91497aa%2C34363%2C1731299646472.1731299646634$masterlocalwal$ 2024-11-11T04:34:50,738 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-11T04:34:50,738 INFO [M:0;a7bef91497aa:34363 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-11T04:34:50,738 INFO [M:0;a7bef91497aa:34363 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34363 2024-11-11T04:34:50,739 INFO [M:0;a7bef91497aa:34363 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-11T04:34:50,841 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34363-0x101959b91600000, quorum=127.0.0.1:51151, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T04:34:50,841 INFO [M:0;a7bef91497aa:34363 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-11T04:34:50,841 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34363-0x101959b91600000, quorum=127.0.0.1:51151, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T04:34:50,843 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@69aa82e5{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T04:34:50,843 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6fc4b42f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T04:34:50,843 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T04:34:50,844 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@69a3dde8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T04:34:50,844 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1191c470{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/hadoop.log.dir/,STOPPED} 2024-11-11T04:34:50,845 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@4f50cae0 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-454738336-172.17.0.2-1731299645786:blk_1073741837_1013, datanode=DatanodeInfoWithStorage[127.0.0.1:46201,null,null]) java.io.InterruptedIOException: DestHost:destPort localhost:42071 , LocalHost:localPort a7bef91497aa/172.17.0.2:0. Failed on local exception: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:936) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:963) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more Caused by: java.lang.InterruptedException: sleep interrupted at java.lang.Thread.sleep(Native Method) ~[?:?] at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-11T04:34:50,845 WARN [BP-454738336-172.17.0.2-1731299645786 heartbeating to localhost/127.0.0.1:41327 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T04:34:50,845 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T04:34:50,845 WARN [BP-454738336-172.17.0.2-1731299645786 heartbeating to localhost/127.0.0.1:41327 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-454738336-172.17.0.2-1731299645786 (Datanode Uuid 9cbb48e6-377e-490c-b963-e735d7cbd305) service to localhost/127.0.0.1:41327 2024-11-11T04:34:50,845 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T04:34:50,846 ERROR [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@4f50cae0 {}] datanode.DataNode(1743): Cannot find BPOfferService for reporting block received for bpid=BP-454738336-172.17.0.2-1731299645786 2024-11-11T04:34:50,846 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/cluster_8b97c0a2-b56e-2508-7199-0bcaa1af3817/data/data3/current/BP-454738336-172.17.0.2-1731299645786 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T04:34:50,846 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/cluster_8b97c0a2-b56e-2508-7199-0bcaa1af3817/data/data4/current/BP-454738336-172.17.0.2-1731299645786 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T04:34:50,847 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T04:34:50,847 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@4f50cae0 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-454738336-172.17.0.2-1731299645786:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:35251,null,null]) java.io.IOException: No block pool offer service for bpid=BP-454738336-172.17.0.2-1731299645786 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:34:50,847 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@4f50cae0 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-454738336-172.17.0.2-1731299645786:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:46201,null,null]) java.io.IOException: No block pool offer service for bpid=BP-454738336-172.17.0.2-1731299645786 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:34:50,847 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@4f50cae0 {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-454738336-172.17.0.2-1731299645786:blk_1073741834_1010; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:35251,null,null], DatanodeInfoWithStorage[127.0.0.1:46201,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: All datanodes failed: block=BP-454738336-172.17.0.2-1731299645786:blk_1073741834_1010, datanodeids=[DatanodeInfoWithStorage[127.0.0.1:35251,null,null], DatanodeInfoWithStorage[127.0.0.1:46201,null,null]] 2024-11-11T04:34:50,849 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@458e5bcf{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T04:34:50,849 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6b75ba45{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T04:34:50,849 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T04:34:50,849 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3986ff43{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T04:34:50,849 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7fbc343d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/hadoop.log.dir/,STOPPED} 2024-11-11T04:34:50,850 WARN [BP-454738336-172.17.0.2-1731299645786 heartbeating to localhost/127.0.0.1:41327 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T04:34:50,851 WARN [BP-454738336-172.17.0.2-1731299645786 heartbeating to localhost/127.0.0.1:41327 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-454738336-172.17.0.2-1731299645786 (Datanode Uuid c22f2d70-f590-45e9-9ca1-b877c52e1b27) service to localhost/127.0.0.1:41327 2024-11-11T04:34:50,851 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/cluster_8b97c0a2-b56e-2508-7199-0bcaa1af3817/data/data7/current/BP-454738336-172.17.0.2-1731299645786 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T04:34:50,851 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/cluster_8b97c0a2-b56e-2508-7199-0bcaa1af3817/data/data8/current/BP-454738336-172.17.0.2-1731299645786 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T04:34:50,852 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T04:34:50,852 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T04:34:50,852 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T04:34:50,857 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7cd2a640{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-11T04:34:50,858 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@64b7b556{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T04:34:50,858 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T04:34:50,858 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3ed3a961{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T04:34:50,858 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6a488aac{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/hadoop.log.dir/,STOPPED} 2024-11-11T04:34:50,866 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-11T04:34:50,893 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-11T04:34:50,901 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=154 (was 78) Potentially hanging thread: LeaseRenewer:jenkins@localhost:41327 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:41327 from jenkins.hfs.3 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-17-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$900/0x00007fac14bef800.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:41327 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:41327 from jenkins.hfs.2 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-21-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.1@localhost:46393 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-17-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.2@localhost:41327 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:46393 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41327 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-16-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.3@localhost:41327 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:41327 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-21-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41327 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41327 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: nioEventLoopGroup-20-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$900/0x00007fac14bef800.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-17-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-16-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'DataNode' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41327 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-16-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=452 (was 402) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=7 (was 9), ProcessCount=11 (was 11), AvailableMemoryMB=6778 (was 7211) 2024-11-11T04:34:50,908 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=154, OpenFileDescriptor=452, MaxFileDescriptor=1048576, SystemLoadAverage=7, ProcessCount=11, AvailableMemoryMB=6778 2024-11-11T04:34:50,908 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-11T04:34:50,908 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/hadoop.log.dir so I do NOT create it in target/test-data/88b5afca-deb2-5599-4ad7-aea3a40a678b 2024-11-11T04:34:50,908 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d549239d-bab0-c761-e004-d9edc084ad03/hadoop.tmp.dir so I do NOT create it in target/test-data/88b5afca-deb2-5599-4ad7-aea3a40a678b 2024-11-11T04:34:50,908 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88b5afca-deb2-5599-4ad7-aea3a40a678b/cluster_aaae5ed2-a833-51e3-3a29-3cd15d4b1b16, deleteOnExit=true 2024-11-11T04:34:50,908 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-11T04:34:50,908 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88b5afca-deb2-5599-4ad7-aea3a40a678b/test.cache.data in system properties and HBase conf 2024-11-11T04:34:50,908 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88b5afca-deb2-5599-4ad7-aea3a40a678b/hadoop.tmp.dir in system properties and HBase conf 2024-11-11T04:34:50,908 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88b5afca-deb2-5599-4ad7-aea3a40a678b/hadoop.log.dir in system properties and HBase conf 2024-11-11T04:34:50,908 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88b5afca-deb2-5599-4ad7-aea3a40a678b/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-11T04:34:50,909 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88b5afca-deb2-5599-4ad7-aea3a40a678b/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-11T04:34:50,909 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-11T04:34:50,909 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-11T04:34:50,909 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88b5afca-deb2-5599-4ad7-aea3a40a678b/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-11T04:34:50,909 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88b5afca-deb2-5599-4ad7-aea3a40a678b/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-11T04:34:50,909 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88b5afca-deb2-5599-4ad7-aea3a40a678b/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-11T04:34:50,909 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88b5afca-deb2-5599-4ad7-aea3a40a678b/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-11T04:34:50,909 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88b5afca-deb2-5599-4ad7-aea3a40a678b/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-11T04:34:50,909 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88b5afca-deb2-5599-4ad7-aea3a40a678b/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-11T04:34:50,909 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88b5afca-deb2-5599-4ad7-aea3a40a678b/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-11T04:34:50,909 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88b5afca-deb2-5599-4ad7-aea3a40a678b/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-11T04:34:50,909 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88b5afca-deb2-5599-4ad7-aea3a40a678b/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-11T04:34:50,909 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88b5afca-deb2-5599-4ad7-aea3a40a678b/nfs.dump.dir in system properties and HBase conf 2024-11-11T04:34:50,909 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88b5afca-deb2-5599-4ad7-aea3a40a678b/java.io.tmpdir in system properties and HBase conf 2024-11-11T04:34:50,909 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88b5afca-deb2-5599-4ad7-aea3a40a678b/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-11T04:34:50,909 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88b5afca-deb2-5599-4ad7-aea3a40a678b/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-11T04:34:50,910 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88b5afca-deb2-5599-4ad7-aea3a40a678b/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-11T04:34:50,923 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-11T04:34:50,988 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T04:34:50,993 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T04:34:50,994 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T04:34:50,994 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T04:34:50,994 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-11T04:34:50,996 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T04:34:50,997 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6495f923{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88b5afca-deb2-5599-4ad7-aea3a40a678b/hadoop.log.dir/,AVAILABLE} 2024-11-11T04:34:50,997 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6d60493b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T04:34:51,109 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3e84569b{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88b5afca-deb2-5599-4ad7-aea3a40a678b/java.io.tmpdir/jetty-localhost-46351-hadoop-hdfs-3_4_1-tests_jar-_-any-2418338150050597181/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-11T04:34:51,109 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@ea144a0{HTTP/1.1, (http/1.1)}{localhost:46351} 2024-11-11T04:34:51,109 INFO [Time-limited test {}] server.Server(415): Started @148911ms 2024-11-11T04:34:51,122 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-11T04:34:51,185 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T04:34:51,188 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T04:34:51,188 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T04:34:51,188 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T04:34:51,188 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-11T04:34:51,189 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@78e57521{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88b5afca-deb2-5599-4ad7-aea3a40a678b/hadoop.log.dir/,AVAILABLE} 2024-11-11T04:34:51,189 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@168478e8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T04:34:51,300 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7096145a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88b5afca-deb2-5599-4ad7-aea3a40a678b/java.io.tmpdir/jetty-localhost-33197-hadoop-hdfs-3_4_1-tests_jar-_-any-4623795663989641870/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T04:34:51,301 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@13bf27af{HTTP/1.1, (http/1.1)}{localhost:33197} 2024-11-11T04:34:51,301 INFO [Time-limited test {}] server.Server(415): Started @149103ms 2024-11-11T04:34:51,303 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T04:34:51,331 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T04:34:51,334 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T04:34:51,335 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T04:34:51,335 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T04:34:51,335 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-11T04:34:51,336 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7c6cb23c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88b5afca-deb2-5599-4ad7-aea3a40a678b/hadoop.log.dir/,AVAILABLE} 2024-11-11T04:34:51,336 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@365c2477{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T04:34:51,377 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:34:51,388 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:34:51,396 WARN [Thread-1198 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88b5afca-deb2-5599-4ad7-aea3a40a678b/cluster_aaae5ed2-a833-51e3-3a29-3cd15d4b1b16/data/data2/current/BP-1384133828-172.17.0.2-1731299690940/current, will proceed with Du for space computation calculation, 2024-11-11T04:34:51,396 WARN [Thread-1197 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88b5afca-deb2-5599-4ad7-aea3a40a678b/cluster_aaae5ed2-a833-51e3-3a29-3cd15d4b1b16/data/data1/current/BP-1384133828-172.17.0.2-1731299690940/current, will proceed with Du for space computation calculation, 2024-11-11T04:34:51,413 WARN [Thread-1176 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T04:34:51,415 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7c9341be46f2d649 with lease ID 0x479abea76b6abc: Processing first storage report for DS-6566c4a9-3245-49c8-bf5f-7b1a9d93becb from datanode DatanodeRegistration(127.0.0.1:43511, datanodeUuid=5bdc0d2d-5e97-4e14-9cd6-3d3c6eabc44e, infoPort=42625, infoSecurePort=0, ipcPort=41891, storageInfo=lv=-57;cid=testClusterID;nsid=1713708580;c=1731299690940) 2024-11-11T04:34:51,415 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7c9341be46f2d649 with lease ID 0x479abea76b6abc: from storage DS-6566c4a9-3245-49c8-bf5f-7b1a9d93becb node DatanodeRegistration(127.0.0.1:43511, datanodeUuid=5bdc0d2d-5e97-4e14-9cd6-3d3c6eabc44e, infoPort=42625, infoSecurePort=0, ipcPort=41891, storageInfo=lv=-57;cid=testClusterID;nsid=1713708580;c=1731299690940), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T04:34:51,416 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7c9341be46f2d649 with lease ID 0x479abea76b6abc: Processing first storage report for DS-a09f1d5d-f986-4c2b-b5fc-48c34034172a from datanode DatanodeRegistration(127.0.0.1:43511, datanodeUuid=5bdc0d2d-5e97-4e14-9cd6-3d3c6eabc44e, infoPort=42625, infoSecurePort=0, ipcPort=41891, storageInfo=lv=-57;cid=testClusterID;nsid=1713708580;c=1731299690940) 2024-11-11T04:34:51,416 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7c9341be46f2d649 with lease ID 0x479abea76b6abc: from storage DS-a09f1d5d-f986-4c2b-b5fc-48c34034172a node DatanodeRegistration(127.0.0.1:43511, datanodeUuid=5bdc0d2d-5e97-4e14-9cd6-3d3c6eabc44e, infoPort=42625, infoSecurePort=0, ipcPort=41891, storageInfo=lv=-57;cid=testClusterID;nsid=1713708580;c=1731299690940), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T04:34:51,450 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@30a928dc{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88b5afca-deb2-5599-4ad7-aea3a40a678b/java.io.tmpdir/jetty-localhost-41611-hadoop-hdfs-3_4_1-tests_jar-_-any-256889543094584124/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T04:34:51,450 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@341f9f9e{HTTP/1.1, (http/1.1)}{localhost:41611} 2024-11-11T04:34:51,450 INFO [Time-limited test {}] server.Server(415): Started @149252ms 2024-11-11T04:34:51,451 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T04:34:51,541 WARN [Thread-1223 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88b5afca-deb2-5599-4ad7-aea3a40a678b/cluster_aaae5ed2-a833-51e3-3a29-3cd15d4b1b16/data/data3/current/BP-1384133828-172.17.0.2-1731299690940/current, will proceed with Du for space computation calculation, 2024-11-11T04:34:51,541 WARN [Thread-1224 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88b5afca-deb2-5599-4ad7-aea3a40a678b/cluster_aaae5ed2-a833-51e3-3a29-3cd15d4b1b16/data/data4/current/BP-1384133828-172.17.0.2-1731299690940/current, will proceed with Du for space computation calculation, 2024-11-11T04:34:51,557 WARN [Thread-1212 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T04:34:51,560 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1f872a8491e4c20d with lease ID 0x479abea76b6abd: Processing first storage report for DS-d80c624a-0f9d-47a0-bb5a-38dd126d81a1 from datanode DatanodeRegistration(127.0.0.1:44729, datanodeUuid=63425e5e-0ac8-4381-82ea-5b5425f0f552, infoPort=33759, infoSecurePort=0, ipcPort=42607, storageInfo=lv=-57;cid=testClusterID;nsid=1713708580;c=1731299690940) 2024-11-11T04:34:51,560 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1f872a8491e4c20d with lease ID 0x479abea76b6abd: from storage DS-d80c624a-0f9d-47a0-bb5a-38dd126d81a1 node DatanodeRegistration(127.0.0.1:44729, datanodeUuid=63425e5e-0ac8-4381-82ea-5b5425f0f552, infoPort=33759, infoSecurePort=0, ipcPort=42607, storageInfo=lv=-57;cid=testClusterID;nsid=1713708580;c=1731299690940), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T04:34:51,560 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1f872a8491e4c20d with lease ID 0x479abea76b6abd: Processing first storage report for DS-60b61927-4167-482c-8710-30f94aae55cf from datanode DatanodeRegistration(127.0.0.1:44729, datanodeUuid=63425e5e-0ac8-4381-82ea-5b5425f0f552, infoPort=33759, infoSecurePort=0, ipcPort=42607, storageInfo=lv=-57;cid=testClusterID;nsid=1713708580;c=1731299690940) 2024-11-11T04:34:51,560 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1f872a8491e4c20d with lease ID 0x479abea76b6abd: from storage DS-60b61927-4167-482c-8710-30f94aae55cf node DatanodeRegistration(127.0.0.1:44729, datanodeUuid=63425e5e-0ac8-4381-82ea-5b5425f0f552, infoPort=33759, infoSecurePort=0, ipcPort=42607, storageInfo=lv=-57;cid=testClusterID;nsid=1713708580;c=1731299690940), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T04:34:51,574 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88b5afca-deb2-5599-4ad7-aea3a40a678b 2024-11-11T04:34:51,577 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88b5afca-deb2-5599-4ad7-aea3a40a678b/cluster_aaae5ed2-a833-51e3-3a29-3cd15d4b1b16/zookeeper_0, clientPort=61413, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88b5afca-deb2-5599-4ad7-aea3a40a678b/cluster_aaae5ed2-a833-51e3-3a29-3cd15d4b1b16/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88b5afca-deb2-5599-4ad7-aea3a40a678b/cluster_aaae5ed2-a833-51e3-3a29-3cd15d4b1b16/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-11T04:34:51,577 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=61413 2024-11-11T04:34:51,578 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T04:34:51,579 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T04:34:51,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43511 is added to blk_1073741825_1001 (size=7) 2024-11-11T04:34:51,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44729 is added to blk_1073741825_1001 (size=7) 2024-11-11T04:34:51,589 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94 with version=8 2024-11-11T04:34:51,589 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/hbase-staging 2024-11-11T04:34:51,591 INFO [Time-limited test {}] client.ConnectionUtils(128): master/a7bef91497aa:0 server-side Connection retries=45 2024-11-11T04:34:51,591 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T04:34:51,591 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-11T04:34:51,591 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-11T04:34:51,591 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T04:34:51,591 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-11T04:34:51,591 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-11T04:34:51,591 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-11T04:34:51,592 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45233 2024-11-11T04:34:51,593 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:45233 connecting to ZooKeeper ensemble=127.0.0.1:61413 2024-11-11T04:34:51,599 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:452330x0, quorum=127.0.0.1:61413, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-11T04:34:51,599 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:45233-0x101959c41a00000 connected 2024-11-11T04:34:51,614 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T04:34:51,615 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T04:34:51,617 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45233-0x101959c41a00000, quorum=127.0.0.1:61413, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T04:34:51,617 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94, hbase.cluster.distributed=false 2024-11-11T04:34:51,619 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45233-0x101959c41a00000, quorum=127.0.0.1:61413, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-11T04:34:51,619 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45233 2024-11-11T04:34:51,619 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45233 2024-11-11T04:34:51,619 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45233 2024-11-11T04:34:51,620 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45233 2024-11-11T04:34:51,620 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45233 2024-11-11T04:34:51,635 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/a7bef91497aa:0 server-side Connection retries=45 2024-11-11T04:34:51,635 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T04:34:51,635 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-11T04:34:51,635 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-11T04:34:51,635 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T04:34:51,635 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-11T04:34:51,635 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-11T04:34:51,635 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-11T04:34:51,636 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:36711 2024-11-11T04:34:51,637 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:36711 connecting to ZooKeeper ensemble=127.0.0.1:61413 2024-11-11T04:34:51,637 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T04:34:51,639 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T04:34:51,643 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:367110x0, quorum=127.0.0.1:61413, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-11T04:34:51,643 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:367110x0, quorum=127.0.0.1:61413, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T04:34:51,643 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:36711-0x101959c41a00001 connected 2024-11-11T04:34:51,643 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-11T04:34:51,644 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-11T04:34:51,645 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36711-0x101959c41a00001, quorum=127.0.0.1:61413, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-11T04:34:51,645 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36711-0x101959c41a00001, quorum=127.0.0.1:61413, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-11T04:34:51,646 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36711 2024-11-11T04:34:51,646 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36711 2024-11-11T04:34:51,646 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36711 2024-11-11T04:34:51,647 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36711 2024-11-11T04:34:51,647 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36711 2024-11-11T04:34:51,658 DEBUG [M:0;a7bef91497aa:45233 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;a7bef91497aa:45233 2024-11-11T04:34:51,659 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/a7bef91497aa,45233,1731299691590 2024-11-11T04:34:51,661 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45233-0x101959c41a00000, quorum=127.0.0.1:61413, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T04:34:51,661 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36711-0x101959c41a00001, quorum=127.0.0.1:61413, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T04:34:51,662 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45233-0x101959c41a00000, quorum=127.0.0.1:61413, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/a7bef91497aa,45233,1731299691590 2024-11-11T04:34:51,663 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36711-0x101959c41a00001, quorum=127.0.0.1:61413, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-11T04:34:51,663 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45233-0x101959c41a00000, quorum=127.0.0.1:61413, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:34:51,664 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36711-0x101959c41a00001, quorum=127.0.0.1:61413, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:34:51,664 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45233-0x101959c41a00000, quorum=127.0.0.1:61413, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-11T04:34:51,664 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/a7bef91497aa,45233,1731299691590 from backup master directory 2024-11-11T04:34:51,666 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45233-0x101959c41a00000, quorum=127.0.0.1:61413, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/a7bef91497aa,45233,1731299691590 2024-11-11T04:34:51,666 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45233-0x101959c41a00000, quorum=127.0.0.1:61413, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T04:34:51,666 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36711-0x101959c41a00001, quorum=127.0.0.1:61413, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T04:34:51,666 WARN [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-11T04:34:51,666 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=a7bef91497aa,45233,1731299691590 2024-11-11T04:34:51,671 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/hbase.id] with ID: 2b09363f-62e0-4558-a43c-748f9c7b97a7 2024-11-11T04:34:51,671 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/.tmp/hbase.id 2024-11-11T04:34:51,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43511 is added to blk_1073741826_1002 (size=42) 2024-11-11T04:34:51,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44729 is added to blk_1073741826_1002 (size=42) 2024-11-11T04:34:51,678 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/.tmp/hbase.id]:[hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/hbase.id] 2024-11-11T04:34:51,689 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T04:34:51,689 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-11T04:34:51,690 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-11T04:34:51,692 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45233-0x101959c41a00000, quorum=127.0.0.1:61413, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:34:51,692 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36711-0x101959c41a00001, quorum=127.0.0.1:61413, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:34:51,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43511 is added to blk_1073741827_1003 (size=196) 2024-11-11T04:34:51,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44729 is added to blk_1073741827_1003 (size=196) 2024-11-11T04:34:51,700 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-11T04:34:51,701 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-11T04:34:51,701 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T04:34:51,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43511 is added to blk_1073741828_1004 (size=1189) 2024-11-11T04:34:51,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44729 is added to blk_1073741828_1004 (size=1189) 2024-11-11T04:34:52,109 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/MasterData/data/master/store 2024-11-11T04:34:52,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44729 is added to blk_1073741829_1005 (size=34) 2024-11-11T04:34:52,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43511 is added to blk_1073741829_1005 (size=34) 2024-11-11T04:34:52,116 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T04:34:52,117 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-11T04:34:52,117 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T04:34:52,117 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T04:34:52,117 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-11T04:34:52,117 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T04:34:52,117 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T04:34:52,117 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731299692117Disabling compacts and flushes for region at 1731299692117Disabling writes for close at 1731299692117Writing region close event to WAL at 1731299692117Closed at 1731299692117 2024-11-11T04:34:52,118 WARN [master/a7bef91497aa:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/MasterData/data/master/store/.initializing 2024-11-11T04:34:52,118 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/MasterData/WALs/a7bef91497aa,45233,1731299691590 2024-11-11T04:34:52,120 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a7bef91497aa%2C45233%2C1731299691590, suffix=, logDir=hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/MasterData/WALs/a7bef91497aa,45233,1731299691590, archiveDir=hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/MasterData/oldWALs, maxLogs=10 2024-11-11T04:34:52,120 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor a7bef91497aa%2C45233%2C1731299691590.1731299692120 2024-11-11T04:34:52,125 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/MasterData/WALs/a7bef91497aa,45233,1731299691590/a7bef91497aa%2C45233%2C1731299691590.1731299692120 2024-11-11T04:34:52,126 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42625:42625),(127.0.0.1/127.0.0.1:33759:33759)] 2024-11-11T04:34:52,127 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-11T04:34:52,127 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T04:34:52,127 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:34:52,127 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:34:52,128 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:34:52,130 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-11T04:34:52,130 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:34:52,130 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T04:34:52,130 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:34:52,131 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-11T04:34:52,131 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:34:52,132 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T04:34:52,132 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:34:52,133 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-11T04:34:52,133 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:34:52,133 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T04:34:52,133 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:34:52,134 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-11T04:34:52,134 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:34:52,134 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T04:34:52,135 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:34:52,135 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:34:52,135 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:34:52,137 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:34:52,137 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:34:52,137 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-11T04:34:52,138 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:34:52,140 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T04:34:52,140 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=809189, jitterRate=0.028938084840774536}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-11T04:34:52,141 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731299692127Initializing all the Stores at 1731299692128 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731299692128Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731299692128Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731299692128Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731299692128Cleaning up temporary data from old regions at 1731299692137 (+9 ms)Region opened successfully at 1731299692141 (+4 ms) 2024-11-11T04:34:52,141 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-11T04:34:52,144 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1f42d117, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=a7bef91497aa/172.17.0.2:0 2024-11-11T04:34:52,145 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-11T04:34:52,145 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-11T04:34:52,145 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-11T04:34:52,145 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-11T04:34:52,146 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-11T04:34:52,146 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-11T04:34:52,146 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-11T04:34:52,148 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-11T04:34:52,149 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45233-0x101959c41a00000, quorum=127.0.0.1:61413, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-11T04:34:52,151 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-11T04:34:52,151 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-11T04:34:52,152 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45233-0x101959c41a00000, quorum=127.0.0.1:61413, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-11T04:34:52,154 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-11T04:34:52,154 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-11T04:34:52,155 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45233-0x101959c41a00000, quorum=127.0.0.1:61413, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-11T04:34:52,156 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-11T04:34:52,157 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45233-0x101959c41a00000, quorum=127.0.0.1:61413, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-11T04:34:52,159 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-11T04:34:52,161 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45233-0x101959c41a00000, quorum=127.0.0.1:61413, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-11T04:34:52,163 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-11T04:34:52,165 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45233-0x101959c41a00000, quorum=127.0.0.1:61413, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-11T04:34:52,165 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36711-0x101959c41a00001, quorum=127.0.0.1:61413, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-11T04:34:52,165 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45233-0x101959c41a00000, quorum=127.0.0.1:61413, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:34:52,165 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36711-0x101959c41a00001, quorum=127.0.0.1:61413, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:34:52,165 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=a7bef91497aa,45233,1731299691590, sessionid=0x101959c41a00000, setting cluster-up flag (Was=false) 2024-11-11T04:34:52,168 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45233-0x101959c41a00000, quorum=127.0.0.1:61413, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:34:52,168 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36711-0x101959c41a00001, quorum=127.0.0.1:61413, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:34:52,174 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-11T04:34:52,175 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=a7bef91497aa,45233,1731299691590 2024-11-11T04:34:52,178 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36711-0x101959c41a00001, quorum=127.0.0.1:61413, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:34:52,178 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45233-0x101959c41a00000, quorum=127.0.0.1:61413, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:34:52,183 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-11T04:34:52,184 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=a7bef91497aa,45233,1731299691590 2024-11-11T04:34:52,185 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-11T04:34:52,187 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-11T04:34:52,187 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-11T04:34:52,187 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-11T04:34:52,187 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: a7bef91497aa,45233,1731299691590 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-11T04:34:52,189 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/a7bef91497aa:0, corePoolSize=5, maxPoolSize=5 2024-11-11T04:34:52,189 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/a7bef91497aa:0, corePoolSize=5, maxPoolSize=5 2024-11-11T04:34:52,189 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/a7bef91497aa:0, corePoolSize=5, maxPoolSize=5 2024-11-11T04:34:52,189 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/a7bef91497aa:0, corePoolSize=5, maxPoolSize=5 2024-11-11T04:34:52,189 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/a7bef91497aa:0, corePoolSize=10, maxPoolSize=10 2024-11-11T04:34:52,189 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:34:52,189 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/a7bef91497aa:0, corePoolSize=2, maxPoolSize=2 2024-11-11T04:34:52,189 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:34:52,190 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731299722190 2024-11-11T04:34:52,190 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-11T04:34:52,190 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-11T04:34:52,190 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-11T04:34:52,190 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-11T04:34:52,190 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-11T04:34:52,190 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-11T04:34:52,190 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-11T04:34:52,191 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-11T04:34:52,191 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-11T04:34:52,191 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-11T04:34:52,191 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-11T04:34:52,191 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-11T04:34:52,191 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-11T04:34:52,191 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-11T04:34:52,191 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/a7bef91497aa:0:becomeActiveMaster-HFileCleaner.large.0-1731299692191,5,FailOnTimeoutGroup] 2024-11-11T04:34:52,191 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/a7bef91497aa:0:becomeActiveMaster-HFileCleaner.small.0-1731299692191,5,FailOnTimeoutGroup] 2024-11-11T04:34:52,192 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-11T04:34:52,192 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-11T04:34:52,192 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-11T04:34:52,192 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-11T04:34:52,192 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:34:52,192 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-11T04:34:52,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44729 is added to blk_1073741831_1007 (size=1321) 2024-11-11T04:34:52,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43511 is added to blk_1073741831_1007 (size=1321) 2024-11-11T04:34:52,200 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-11T04:34:52,201 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94 2024-11-11T04:34:52,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43511 is added to blk_1073741832_1008 (size=32) 2024-11-11T04:34:52,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44729 is added to blk_1073741832_1008 (size=32) 2024-11-11T04:34:52,208 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T04:34:52,209 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-11T04:34:52,210 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-11T04:34:52,210 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:34:52,210 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T04:34:52,211 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-11T04:34:52,212 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-11T04:34:52,212 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:34:52,212 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T04:34:52,212 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-11T04:34:52,213 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-11T04:34:52,214 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:34:52,214 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T04:34:52,214 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-11T04:34:52,215 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-11T04:34:52,215 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:34:52,216 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T04:34:52,216 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-11T04:34:52,216 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/data/hbase/meta/1588230740 2024-11-11T04:34:52,217 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/data/hbase/meta/1588230740 2024-11-11T04:34:52,218 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-11T04:34:52,218 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-11T04:34:52,219 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-11T04:34:52,220 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-11T04:34:52,222 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T04:34:52,222 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=746163, jitterRate=-0.05120508372783661}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-11T04:34:52,223 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731299692208Initializing all the Stores at 1731299692208Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731299692208Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731299692209 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731299692209Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731299692209Cleaning up temporary data from old regions at 1731299692218 (+9 ms)Region opened successfully at 1731299692223 (+5 ms) 2024-11-11T04:34:52,223 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-11T04:34:52,223 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-11T04:34:52,223 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-11T04:34:52,223 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-11T04:34:52,223 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-11T04:34:52,223 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-11T04:34:52,224 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731299692223Disabling compacts and flushes for region at 1731299692223Disabling writes for close at 1731299692223Writing region close event to WAL at 1731299692223Closed at 1731299692223 2024-11-11T04:34:52,225 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-11T04:34:52,225 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-11T04:34:52,225 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-11T04:34:52,226 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-11T04:34:52,227 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-11T04:34:52,249 INFO [RS:0;a7bef91497aa:36711 {}] regionserver.HRegionServer(746): ClusterId : 2b09363f-62e0-4558-a43c-748f9c7b97a7 2024-11-11T04:34:52,249 DEBUG [RS:0;a7bef91497aa:36711 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-11T04:34:52,251 DEBUG [RS:0;a7bef91497aa:36711 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-11T04:34:52,251 DEBUG [RS:0;a7bef91497aa:36711 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-11T04:34:52,254 DEBUG [RS:0;a7bef91497aa:36711 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-11T04:34:52,254 DEBUG [RS:0;a7bef91497aa:36711 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7404f611, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=a7bef91497aa/172.17.0.2:0 2024-11-11T04:34:52,266 DEBUG [RS:0;a7bef91497aa:36711 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;a7bef91497aa:36711 2024-11-11T04:34:52,266 INFO [RS:0;a7bef91497aa:36711 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-11T04:34:52,266 INFO [RS:0;a7bef91497aa:36711 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-11T04:34:52,266 DEBUG [RS:0;a7bef91497aa:36711 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-11T04:34:52,267 INFO [RS:0;a7bef91497aa:36711 {}] regionserver.HRegionServer(2659): reportForDuty to master=a7bef91497aa,45233,1731299691590 with port=36711, startcode=1731299691635 2024-11-11T04:34:52,267 DEBUG [RS:0;a7bef91497aa:36711 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-11T04:34:52,269 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42019, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-11-11T04:34:52,269 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45233 {}] master.ServerManager(363): Checking decommissioned status of RegionServer a7bef91497aa,36711,1731299691635 2024-11-11T04:34:52,270 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45233 {}] master.ServerManager(517): Registering regionserver=a7bef91497aa,36711,1731299691635 2024-11-11T04:34:52,271 DEBUG [RS:0;a7bef91497aa:36711 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94 2024-11-11T04:34:52,271 DEBUG [RS:0;a7bef91497aa:36711 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:45989 2024-11-11T04:34:52,271 DEBUG [RS:0;a7bef91497aa:36711 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-11T04:34:52,273 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45233-0x101959c41a00000, quorum=127.0.0.1:61413, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-11T04:34:52,273 DEBUG [RS:0;a7bef91497aa:36711 {}] zookeeper.ZKUtil(111): regionserver:36711-0x101959c41a00001, quorum=127.0.0.1:61413, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/a7bef91497aa,36711,1731299691635 2024-11-11T04:34:52,273 WARN [RS:0;a7bef91497aa:36711 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-11T04:34:52,274 INFO [RS:0;a7bef91497aa:36711 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T04:34:52,274 DEBUG [RS:0;a7bef91497aa:36711 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/WALs/a7bef91497aa,36711,1731299691635 2024-11-11T04:34:52,274 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [a7bef91497aa,36711,1731299691635] 2024-11-11T04:34:52,277 INFO [RS:0;a7bef91497aa:36711 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-11T04:34:52,280 INFO [RS:0;a7bef91497aa:36711 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-11T04:34:52,280 INFO [RS:0;a7bef91497aa:36711 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-11T04:34:52,280 INFO [RS:0;a7bef91497aa:36711 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T04:34:52,282 INFO [RS:0;a7bef91497aa:36711 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-11T04:34:52,283 INFO [RS:0;a7bef91497aa:36711 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-11T04:34:52,283 INFO [RS:0;a7bef91497aa:36711 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-11T04:34:52,283 DEBUG [RS:0;a7bef91497aa:36711 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:34:52,283 DEBUG [RS:0;a7bef91497aa:36711 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:34:52,283 DEBUG [RS:0;a7bef91497aa:36711 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:34:52,283 DEBUG [RS:0;a7bef91497aa:36711 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:34:52,283 DEBUG [RS:0;a7bef91497aa:36711 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:34:52,283 DEBUG [RS:0;a7bef91497aa:36711 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/a7bef91497aa:0, corePoolSize=2, maxPoolSize=2 2024-11-11T04:34:52,283 DEBUG [RS:0;a7bef91497aa:36711 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:34:52,283 DEBUG [RS:0;a7bef91497aa:36711 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:34:52,283 DEBUG [RS:0;a7bef91497aa:36711 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:34:52,283 DEBUG [RS:0;a7bef91497aa:36711 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:34:52,283 DEBUG [RS:0;a7bef91497aa:36711 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:34:52,283 DEBUG [RS:0;a7bef91497aa:36711 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:34:52,283 DEBUG [RS:0;a7bef91497aa:36711 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/a7bef91497aa:0, corePoolSize=3, maxPoolSize=3 2024-11-11T04:34:52,283 DEBUG [RS:0;a7bef91497aa:36711 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/a7bef91497aa:0, corePoolSize=3, maxPoolSize=3 2024-11-11T04:34:52,284 INFO [RS:0;a7bef91497aa:36711 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T04:34:52,284 INFO [RS:0;a7bef91497aa:36711 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T04:34:52,284 INFO [RS:0;a7bef91497aa:36711 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T04:34:52,284 INFO [RS:0;a7bef91497aa:36711 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-11T04:34:52,284 INFO [RS:0;a7bef91497aa:36711 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-11T04:34:52,284 INFO [RS:0;a7bef91497aa:36711 {}] hbase.ChoreService(168): Chore ScheduledChore name=a7bef91497aa,36711,1731299691635-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-11T04:34:52,299 INFO [RS:0;a7bef91497aa:36711 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-11T04:34:52,299 INFO [RS:0;a7bef91497aa:36711 {}] hbase.ChoreService(168): Chore ScheduledChore name=a7bef91497aa,36711,1731299691635-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T04:34:52,299 INFO [RS:0;a7bef91497aa:36711 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T04:34:52,299 INFO [RS:0;a7bef91497aa:36711 {}] regionserver.Replication(171): a7bef91497aa,36711,1731299691635 started 2024-11-11T04:34:52,313 INFO [RS:0;a7bef91497aa:36711 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T04:34:52,313 INFO [RS:0;a7bef91497aa:36711 {}] regionserver.HRegionServer(1482): Serving as a7bef91497aa,36711,1731299691635, RpcServer on a7bef91497aa/172.17.0.2:36711, sessionid=0x101959c41a00001 2024-11-11T04:34:52,313 DEBUG [RS:0;a7bef91497aa:36711 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-11T04:34:52,313 DEBUG [RS:0;a7bef91497aa:36711 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager a7bef91497aa,36711,1731299691635 2024-11-11T04:34:52,313 DEBUG [RS:0;a7bef91497aa:36711 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'a7bef91497aa,36711,1731299691635' 2024-11-11T04:34:52,313 DEBUG [RS:0;a7bef91497aa:36711 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-11T04:34:52,314 DEBUG [RS:0;a7bef91497aa:36711 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-11T04:34:52,314 DEBUG [RS:0;a7bef91497aa:36711 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-11T04:34:52,314 DEBUG [RS:0;a7bef91497aa:36711 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-11T04:34:52,314 DEBUG [RS:0;a7bef91497aa:36711 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager a7bef91497aa,36711,1731299691635 2024-11-11T04:34:52,314 DEBUG [RS:0;a7bef91497aa:36711 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'a7bef91497aa,36711,1731299691635' 2024-11-11T04:34:52,314 DEBUG [RS:0;a7bef91497aa:36711 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-11T04:34:52,315 DEBUG [RS:0;a7bef91497aa:36711 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-11T04:34:52,315 DEBUG [RS:0;a7bef91497aa:36711 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-11T04:34:52,315 INFO [RS:0;a7bef91497aa:36711 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-11T04:34:52,315 INFO [RS:0;a7bef91497aa:36711 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-11T04:34:52,377 WARN [a7bef91497aa:45233 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-11T04:34:52,378 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:34:52,389 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:34:52,417 INFO [RS:0;a7bef91497aa:36711 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a7bef91497aa%2C36711%2C1731299691635, suffix=, logDir=hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/WALs/a7bef91497aa,36711,1731299691635, archiveDir=hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/oldWALs, maxLogs=32 2024-11-11T04:34:52,418 INFO [RS:0;a7bef91497aa:36711 {}] monitor.StreamSlowMonitor(122): New stream slow monitor a7bef91497aa%2C36711%2C1731299691635.1731299692417 2024-11-11T04:34:52,424 INFO [RS:0;a7bef91497aa:36711 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/WALs/a7bef91497aa,36711,1731299691635/a7bef91497aa%2C36711%2C1731299691635.1731299692417 2024-11-11T04:34:52,426 DEBUG [RS:0;a7bef91497aa:36711 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42625:42625),(127.0.0.1/127.0.0.1:33759:33759)] 2024-11-11T04:34:52,628 DEBUG [a7bef91497aa:45233 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-11T04:34:52,628 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=a7bef91497aa,36711,1731299691635 2024-11-11T04:34:52,630 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as a7bef91497aa,36711,1731299691635, state=OPENING 2024-11-11T04:34:52,632 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-11T04:34:52,634 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36711-0x101959c41a00001, quorum=127.0.0.1:61413, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:34:52,634 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45233-0x101959c41a00000, quorum=127.0.0.1:61413, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:34:52,634 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-11T04:34:52,634 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T04:34:52,634 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T04:34:52,634 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=a7bef91497aa,36711,1731299691635}] 2024-11-11T04:34:52,788 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-11T04:34:52,789 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50327, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-11T04:34:52,794 INFO [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-11T04:34:52,794 INFO [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T04:34:52,795 INFO [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a7bef91497aa%2C36711%2C1731299691635.meta, suffix=.meta, logDir=hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/WALs/a7bef91497aa,36711,1731299691635, archiveDir=hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/oldWALs, maxLogs=32 2024-11-11T04:34:52,796 INFO [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor a7bef91497aa%2C36711%2C1731299691635.meta.1731299692796.meta 2024-11-11T04:34:52,801 INFO [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/WALs/a7bef91497aa,36711,1731299691635/a7bef91497aa%2C36711%2C1731299691635.meta.1731299692796.meta 2024-11-11T04:34:52,807 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33759:33759),(127.0.0.1/127.0.0.1:42625:42625)] 2024-11-11T04:34:52,808 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-11T04:34:52,808 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-11T04:34:52,808 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-11T04:34:52,808 INFO [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-11T04:34:52,809 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-11T04:34:52,809 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T04:34:52,809 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-11T04:34:52,809 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-11T04:34:52,810 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-11T04:34:52,811 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-11T04:34:52,811 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:34:52,811 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T04:34:52,811 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-11T04:34:52,812 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-11T04:34:52,812 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:34:52,813 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T04:34:52,813 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-11T04:34:52,813 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-11T04:34:52,813 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:34:52,814 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T04:34:52,814 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-11T04:34:52,814 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-11T04:34:52,814 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:34:52,815 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T04:34:52,815 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-11T04:34:52,816 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/data/hbase/meta/1588230740 2024-11-11T04:34:52,817 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/data/hbase/meta/1588230740 2024-11-11T04:34:52,818 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-11T04:34:52,818 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-11T04:34:52,818 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-11T04:34:52,819 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-11T04:34:52,820 INFO [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=718598, jitterRate=-0.0862562358379364}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-11T04:34:52,820 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-11T04:34:52,821 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731299692809Writing region info on filesystem at 1731299692809Initializing all the Stores at 1731299692810 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731299692810Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731299692810Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731299692810Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731299692810Cleaning up temporary data from old regions at 1731299692818 (+8 ms)Running coprocessor post-open hooks at 1731299692820 (+2 ms)Region opened successfully at 1731299692821 (+1 ms) 2024-11-11T04:34:52,822 INFO [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731299692787 2024-11-11T04:34:52,824 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-11T04:34:52,824 INFO [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-11T04:34:52,825 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=a7bef91497aa,36711,1731299691635 2024-11-11T04:34:52,826 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as a7bef91497aa,36711,1731299691635, state=OPEN 2024-11-11T04:34:52,831 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45233-0x101959c41a00000, quorum=127.0.0.1:61413, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-11T04:34:52,831 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36711-0x101959c41a00001, quorum=127.0.0.1:61413, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-11T04:34:52,831 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=a7bef91497aa,36711,1731299691635 2024-11-11T04:34:52,831 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T04:34:52,831 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T04:34:52,833 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-11T04:34:52,834 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=a7bef91497aa,36711,1731299691635 in 197 msec 2024-11-11T04:34:52,836 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-11T04:34:52,836 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 609 msec 2024-11-11T04:34:52,837 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-11T04:34:52,837 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-11T04:34:52,838 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-11T04:34:52,838 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=a7bef91497aa,36711,1731299691635, seqNum=-1] 2024-11-11T04:34:52,839 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T04:34:52,840 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42299, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T04:34:52,845 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 658 msec 2024-11-11T04:34:52,845 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731299692845, completionTime=-1 2024-11-11T04:34:52,845 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-11T04:34:52,845 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-11T04:34:52,847 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-11T04:34:52,847 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731299752847 2024-11-11T04:34:52,847 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731299812847 2024-11-11T04:34:52,847 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-11-11T04:34:52,848 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a7bef91497aa,45233,1731299691590-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T04:34:52,848 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a7bef91497aa,45233,1731299691590-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T04:34:52,848 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a7bef91497aa,45233,1731299691590-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T04:34:52,848 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-a7bef91497aa:45233, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T04:34:52,848 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-11T04:34:52,848 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-11T04:34:52,850 DEBUG [master/a7bef91497aa:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-11T04:34:52,852 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.186sec 2024-11-11T04:34:52,852 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-11T04:34:52,852 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-11T04:34:52,852 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-11T04:34:52,852 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-11T04:34:52,852 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-11T04:34:52,852 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a7bef91497aa,45233,1731299691590-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-11T04:34:52,852 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a7bef91497aa,45233,1731299691590-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-11T04:34:52,854 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-11T04:34:52,854 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-11T04:34:52,854 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a7bef91497aa,45233,1731299691590-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T04:34:52,949 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@27a17641, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T04:34:52,949 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request a7bef91497aa,45233,-1 for getting cluster id 2024-11-11T04:34:52,950 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-11T04:34:52,951 DEBUG [HMaster-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '2b09363f-62e0-4558-a43c-748f9c7b97a7' 2024-11-11T04:34:52,952 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-11T04:34:52,952 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "2b09363f-62e0-4558-a43c-748f9c7b97a7" 2024-11-11T04:34:52,952 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@640579f9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T04:34:52,952 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [a7bef91497aa,45233,-1] 2024-11-11T04:34:52,952 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-11T04:34:52,953 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T04:34:52,954 INFO [HMaster-EventLoopGroup-10-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38104, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-11T04:34:52,955 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@bd6b006, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T04:34:52,955 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-11T04:34:52,956 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=a7bef91497aa,36711,1731299691635, seqNum=-1] 2024-11-11T04:34:52,956 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T04:34:52,958 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46550, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T04:34:52,959 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=a7bef91497aa,45233,1731299691590 2024-11-11T04:34:52,959 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T04:34:52,962 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-11T04:34:52,962 INFO [Time-limited test {}] wal.TestLogRolling(320): Starting testLogRollOnPipelineRestart 2024-11-11T04:34:52,962 INFO [Time-limited test {}] wal.TestLogRolling(323): Replication=2 2024-11-11T04:34:52,962 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-11T04:34:52,963 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.AsyncConnectionImpl(321): The fetched master address is a7bef91497aa,45233,1731299691590 2024-11-11T04:34:52,963 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@4c2a9068 2024-11-11T04:34:52,963 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-11T04:34:52,964 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38108, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-11T04:34:52,965 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45233 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-11T04:34:52,965 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45233 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-11T04:34:52,965 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45233 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnPipelineRestart', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-11T04:34:52,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45233 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart 2024-11-11T04:34:52,968 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_PRE_OPERATION 2024-11-11T04:34:52,968 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:34:52,968 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45233 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnPipelineRestart" procId is: 4 2024-11-11T04:34:52,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45233 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-11T04:34:52,969 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-11T04:34:52,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43511 is added to blk_1073741835_1011 (size=395) 2024-11-11T04:34:52,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44729 is added to blk_1073741835_1011 (size=395) 2024-11-11T04:34:52,978 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => b64134db5f9e04e68fa2887624d07f22, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1731299692965.b64134db5f9e04e68fa2887624d07f22.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnPipelineRestart', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94 2024-11-11T04:34:52,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43511 is added to blk_1073741836_1012 (size=78) 2024-11-11T04:34:52,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44729 is added to blk_1073741836_1012 (size=78) 2024-11-11T04:34:52,985 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1731299692965.b64134db5f9e04e68fa2887624d07f22.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T04:34:52,985 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1722): Closing b64134db5f9e04e68fa2887624d07f22, disabling compactions & flushes 2024-11-11T04:34:52,985 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1731299692965.b64134db5f9e04e68fa2887624d07f22. 2024-11-11T04:34:52,985 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731299692965.b64134db5f9e04e68fa2887624d07f22. 2024-11-11T04:34:52,985 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731299692965.b64134db5f9e04e68fa2887624d07f22. after waiting 0 ms 2024-11-11T04:34:52,985 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1731299692965.b64134db5f9e04e68fa2887624d07f22. 2024-11-11T04:34:52,985 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731299692965.b64134db5f9e04e68fa2887624d07f22. 2024-11-11T04:34:52,985 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1676): Region close journal for b64134db5f9e04e68fa2887624d07f22: Waiting for close lock at 1731299692985Disabling compacts and flushes for region at 1731299692985Disabling writes for close at 1731299692985Writing region close event to WAL at 1731299692985Closed at 1731299692985 2024-11-11T04:34:52,986 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ADD_TO_META 2024-11-11T04:34:52,987 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnPipelineRestart,,1731299692965.b64134db5f9e04e68fa2887624d07f22.","families":{"info":[{"qualifier":"regioninfo","vlen":77,"tag":[],"timestamp":"1731299692987"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731299692987"}]},"ts":"1731299692987"} 2024-11-11T04:34:52,989 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-11T04:34:52,990 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-11T04:34:52,991 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731299692990"}]},"ts":"1731299692990"} 2024-11-11T04:34:52,992 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLING in hbase:meta 2024-11-11T04:34:52,993 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=b64134db5f9e04e68fa2887624d07f22, ASSIGN}] 2024-11-11T04:34:52,994 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=b64134db5f9e04e68fa2887624d07f22, ASSIGN 2024-11-11T04:34:52,995 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=b64134db5f9e04e68fa2887624d07f22, ASSIGN; state=OFFLINE, location=a7bef91497aa,36711,1731299691635; forceNewPlan=false, retain=false 2024-11-11T04:34:53,146 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=b64134db5f9e04e68fa2887624d07f22, regionState=OPENING, regionLocation=a7bef91497aa,36711,1731299691635 2024-11-11T04:34:53,148 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=b64134db5f9e04e68fa2887624d07f22, ASSIGN because future has completed 2024-11-11T04:34:53,149 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure b64134db5f9e04e68fa2887624d07f22, server=a7bef91497aa,36711,1731299691635}] 2024-11-11T04:34:53,306 INFO [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnPipelineRestart,,1731299692965.b64134db5f9e04e68fa2887624d07f22. 2024-11-11T04:34:53,306 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => b64134db5f9e04e68fa2887624d07f22, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1731299692965.b64134db5f9e04e68fa2887624d07f22.', STARTKEY => '', ENDKEY => ''} 2024-11-11T04:34:53,306 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnPipelineRestart b64134db5f9e04e68fa2887624d07f22 2024-11-11T04:34:53,307 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1731299692965.b64134db5f9e04e68fa2887624d07f22.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T04:34:53,307 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for b64134db5f9e04e68fa2887624d07f22 2024-11-11T04:34:53,307 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for b64134db5f9e04e68fa2887624d07f22 2024-11-11T04:34:53,308 INFO [StoreOpener-b64134db5f9e04e68fa2887624d07f22-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region b64134db5f9e04e68fa2887624d07f22 2024-11-11T04:34:53,309 INFO [StoreOpener-b64134db5f9e04e68fa2887624d07f22-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b64134db5f9e04e68fa2887624d07f22 columnFamilyName info 2024-11-11T04:34:53,309 DEBUG [StoreOpener-b64134db5f9e04e68fa2887624d07f22-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:34:53,310 INFO [StoreOpener-b64134db5f9e04e68fa2887624d07f22-1 {}] regionserver.HStore(327): Store=b64134db5f9e04e68fa2887624d07f22/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T04:34:53,310 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for b64134db5f9e04e68fa2887624d07f22 2024-11-11T04:34:53,310 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/data/default/TestLogRolling-testLogRollOnPipelineRestart/b64134db5f9e04e68fa2887624d07f22 2024-11-11T04:34:53,311 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/data/default/TestLogRolling-testLogRollOnPipelineRestart/b64134db5f9e04e68fa2887624d07f22 2024-11-11T04:34:53,311 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for b64134db5f9e04e68fa2887624d07f22 2024-11-11T04:34:53,311 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for b64134db5f9e04e68fa2887624d07f22 2024-11-11T04:34:53,312 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for b64134db5f9e04e68fa2887624d07f22 2024-11-11T04:34:53,314 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/data/default/TestLogRolling-testLogRollOnPipelineRestart/b64134db5f9e04e68fa2887624d07f22/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T04:34:53,315 INFO [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened b64134db5f9e04e68fa2887624d07f22; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=763933, jitterRate=-0.02860960364341736}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-11T04:34:53,315 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for b64134db5f9e04e68fa2887624d07f22 2024-11-11T04:34:53,316 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for b64134db5f9e04e68fa2887624d07f22: Running coprocessor pre-open hook at 1731299693307Writing region info on filesystem at 1731299693307Initializing all the Stores at 1731299693307Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731299693307Cleaning up temporary data from old regions at 1731299693311 (+4 ms)Running coprocessor post-open hooks at 1731299693315 (+4 ms)Region opened successfully at 1731299693316 (+1 ms) 2024-11-11T04:34:53,317 INFO [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnPipelineRestart,,1731299692965.b64134db5f9e04e68fa2887624d07f22., pid=6, masterSystemTime=1731299693302 2024-11-11T04:34:53,319 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnPipelineRestart,,1731299692965.b64134db5f9e04e68fa2887624d07f22. 2024-11-11T04:34:53,319 INFO [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnPipelineRestart,,1731299692965.b64134db5f9e04e68fa2887624d07f22. 2024-11-11T04:34:53,320 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=b64134db5f9e04e68fa2887624d07f22, regionState=OPEN, openSeqNum=2, regionLocation=a7bef91497aa,36711,1731299691635 2024-11-11T04:34:53,323 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure b64134db5f9e04e68fa2887624d07f22, server=a7bef91497aa,36711,1731299691635 because future has completed 2024-11-11T04:34:53,326 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-11T04:34:53,326 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure b64134db5f9e04e68fa2887624d07f22, server=a7bef91497aa,36711,1731299691635 in 175 msec 2024-11-11T04:34:53,329 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-11T04:34:53,329 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=b64134db5f9e04e68fa2887624d07f22, ASSIGN in 333 msec 2024-11-11T04:34:53,330 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-11T04:34:53,330 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731299693330"}]},"ts":"1731299693330"} 2024-11-11T04:34:53,332 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLED in hbase:meta 2024-11-11T04:34:53,333 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_POST_OPERATION 2024-11-11T04:34:53,335 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart in 368 msec 2024-11-11T04:34:53,378 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:34:53,389 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:34:54,379 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:34:54,390 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:34:55,380 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:34:55,390 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:34:56,380 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:34:56,391 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:34:57,381 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:34:57,391 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:34:57,456 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-11T04:34:57,457 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-11T04:34:57,457 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-11T04:34:57,457 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart Metrics about Tables on a single HBase RegionServer 2024-11-11T04:34:57,458 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-11T04:34:57,458 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-11T04:34:58,311 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-11T04:34:58,325 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:34:58,326 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:34:58,326 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:34:58,326 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:34:58,327 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:34:58,327 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:34:58,330 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:34:58,330 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:34:58,330 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:34:58,332 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:34:58,337 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-11T04:34:58,338 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnPipelineRestart' 2024-11-11T04:34:58,381 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:34:58,392 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:34:59,382 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:34:59,392 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:00,383 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:00,393 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:01,383 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:01,393 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:02,384 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:02,394 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:03,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45233 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-11T04:35:03,055 INFO [RPCClient-NioEventLoopGroup-4-15 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnPipelineRestart completed 2024-11-11T04:35:03,055 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnPipelineRestart,, stopping at row=TestLogRolling-testLogRollOnPipelineRestart ,, for max=2147483647 with caching=100 2024-11-11T04:35:03,058 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnPipelineRestart 2024-11-11T04:35:03,058 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnPipelineRestart,,1731299692965.b64134db5f9e04e68fa2887624d07f22. 2024-11-11T04:35:03,061 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnPipelineRestart', row='row1002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnPipelineRestart,,1731299692965.b64134db5f9e04e68fa2887624d07f22., hostname=a7bef91497aa,36711,1731299691635, seqNum=2] 2024-11-11T04:35:03,384 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:03,394 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:04,385 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:04,395 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:05,064 INFO [Time-limited test {}] wal.TestLogRolling(360): log.getCurrentFileName()): hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/WALs/a7bef91497aa,36711,1731299691635/a7bef91497aa%2C36711%2C1731299691635.1731299692417 2024-11-11T04:35:05,064 WARN [ResponseProcessor for block BP-1384133828-172.17.0.2-1731299690940:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1384133828-172.17.0.2-1731299690940:blk_1073741830_1006 java.io.IOException: Bad response ERROR for BP-1384133828-172.17.0.2-1731299690940:blk_1073741830_1006 from datanode DatanodeInfoWithStorage[127.0.0.1:44729,DS-d80c624a-0f9d-47a0-bb5a-38dd126d81a1,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:35:05,064 WARN [ResponseProcessor for block BP-1384133828-172.17.0.2-1731299690940:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1384133828-172.17.0.2-1731299690940:blk_1073741834_1010 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:35:05,064 WARN [ResponseProcessor for block BP-1384133828-172.17.0.2-1731299690940:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1384133828-172.17.0.2-1731299690940:blk_1073741833_1009 java.io.IOException: Bad response ERROR for BP-1384133828-172.17.0.2-1731299690940:blk_1073741833_1009 from datanode DatanodeInfoWithStorage[127.0.0.1:44729,DS-d80c624a-0f9d-47a0-bb5a-38dd126d81a1,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:35:05,065 WARN [DataStreamer for file /user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/MasterData/WALs/a7bef91497aa,45233,1731299691590/a7bef91497aa%2C45233%2C1731299691590.1731299692120 block BP-1384133828-172.17.0.2-1731299690940:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1384133828-172.17.0.2-1731299690940:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43511,DS-6566c4a9-3245-49c8-bf5f-7b1a9d93becb,DISK], DatanodeInfoWithStorage[127.0.0.1:44729,DS-d80c624a-0f9d-47a0-bb5a-38dd126d81a1,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44729,DS-d80c624a-0f9d-47a0-bb5a-38dd126d81a1,DISK]) is bad. 2024-11-11T04:35:05,065 WARN [DataStreamer for file /user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/WALs/a7bef91497aa,36711,1731299691635/a7bef91497aa%2C36711%2C1731299691635.1731299692417 block BP-1384133828-172.17.0.2-1731299690940:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1384133828-172.17.0.2-1731299690940:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43511,DS-6566c4a9-3245-49c8-bf5f-7b1a9d93becb,DISK], DatanodeInfoWithStorage[127.0.0.1:44729,DS-d80c624a-0f9d-47a0-bb5a-38dd126d81a1,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44729,DS-d80c624a-0f9d-47a0-bb5a-38dd126d81a1,DISK]) is bad. 2024-11-11T04:35:05,065 WARN [PacketResponder: BP-1384133828-172.17.0.2-1731299690940:blk_1073741830_1006, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:44729] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:35:05,065 WARN [DataStreamer for file /user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/WALs/a7bef91497aa,36711,1731299691635/a7bef91497aa%2C36711%2C1731299691635.meta.1731299692796.meta block BP-1384133828-172.17.0.2-1731299690940:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1384133828-172.17.0.2-1731299690940:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44729,DS-d80c624a-0f9d-47a0-bb5a-38dd126d81a1,DISK], DatanodeInfoWithStorage[127.0.0.1:43511,DS-6566c4a9-3245-49c8-bf5f-7b1a9d93becb,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44729,DS-d80c624a-0f9d-47a0-bb5a-38dd126d81a1,DISK]) is bad. 2024-11-11T04:35:05,065 WARN [PacketResponder: BP-1384133828-172.17.0.2-1731299690940:blk_1073741833_1009, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:44729] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Broken pipe at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:35:05,065 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_285959890_22 at /127.0.0.1:54110 [Receiving block BP-1384133828-172.17.0.2-1731299690940:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:43511:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54110 dst: /127.0.0.1:43511 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:35:05,066 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_784603438_22 at /127.0.0.1:54126 [Receiving block BP-1384133828-172.17.0.2-1731299690940:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:43511:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54126 dst: /127.0.0.1:43511 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:35:05,066 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_784603438_22 at /127.0.0.1:35724 [Receiving block BP-1384133828-172.17.0.2-1731299690940:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:44729:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35724 dst: /127.0.0.1:44729 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:35:05,066 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_285959890_22 at /127.0.0.1:35712 [Receiving block BP-1384133828-172.17.0.2-1731299690940:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:44729:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35712 dst: /127.0.0.1:44729 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:35:05,066 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_784603438_22 at /127.0.0.1:35734 [Receiving block BP-1384133828-172.17.0.2-1731299690940:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:44729:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35734 dst: /127.0.0.1:44729 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:35:05,066 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_784603438_22 at /127.0.0.1:54154 [Receiving block BP-1384133828-172.17.0.2-1731299690940:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:43511:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54154 dst: /127.0.0.1:43511 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:35:05,068 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@30a928dc{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T04:35:05,068 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@341f9f9e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T04:35:05,068 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T04:35:05,069 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@365c2477{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T04:35:05,069 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7c6cb23c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88b5afca-deb2-5599-4ad7-aea3a40a678b/hadoop.log.dir/,STOPPED} 2024-11-11T04:35:05,070 WARN [BP-1384133828-172.17.0.2-1731299690940 heartbeating to localhost/127.0.0.1:45989 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T04:35:05,070 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T04:35:05,070 WARN [BP-1384133828-172.17.0.2-1731299690940 heartbeating to localhost/127.0.0.1:45989 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1384133828-172.17.0.2-1731299690940 (Datanode Uuid 63425e5e-0ac8-4381-82ea-5b5425f0f552) service to localhost/127.0.0.1:45989 2024-11-11T04:35:05,070 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T04:35:05,071 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88b5afca-deb2-5599-4ad7-aea3a40a678b/cluster_aaae5ed2-a833-51e3-3a29-3cd15d4b1b16/data/data3/current/BP-1384133828-172.17.0.2-1731299690940 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T04:35:05,071 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88b5afca-deb2-5599-4ad7-aea3a40a678b/cluster_aaae5ed2-a833-51e3-3a29-3cd15d4b1b16/data/data4/current/BP-1384133828-172.17.0.2-1731299690940 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T04:35:05,071 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T04:35:05,081 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T04:35:05,084 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T04:35:05,085 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T04:35:05,085 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T04:35:05,085 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-11T04:35:05,086 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@67a2c057{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88b5afca-deb2-5599-4ad7-aea3a40a678b/hadoop.log.dir/,AVAILABLE} 2024-11-11T04:35:05,086 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@371cd9e1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T04:35:05,196 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@269fb75c{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88b5afca-deb2-5599-4ad7-aea3a40a678b/java.io.tmpdir/jetty-localhost-34791-hadoop-hdfs-3_4_1-tests_jar-_-any-4538640456504371869/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T04:35:05,197 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@23080db5{HTTP/1.1, (http/1.1)}{localhost:34791} 2024-11-11T04:35:05,197 INFO [Time-limited test {}] server.Server(415): Started @162999ms 2024-11-11T04:35:05,198 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T04:35:05,216 WARN [ResponseProcessor for block BP-1384133828-172.17.0.2-1731299690940:blk_1073741834_1014 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1384133828-172.17.0.2-1731299690940:blk_1073741834_1014 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:35:05,216 WARN [ResponseProcessor for block BP-1384133828-172.17.0.2-1731299690940:blk_1073741830_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1384133828-172.17.0.2-1731299690940:blk_1073741830_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:35:05,216 WARN [ResponseProcessor for block BP-1384133828-172.17.0.2-1731299690940:blk_1073741833_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1384133828-172.17.0.2-1731299690940:blk_1073741833_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:35:05,216 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_784603438_22 at /127.0.0.1:52378 [Receiving block BP-1384133828-172.17.0.2-1731299690940:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:43511:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52378 dst: /127.0.0.1:43511 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:35:05,216 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_784603438_22 at /127.0.0.1:52392 [Receiving block BP-1384133828-172.17.0.2-1731299690940:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:43511:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52392 dst: /127.0.0.1:43511 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:35:05,216 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_285959890_22 at /127.0.0.1:52394 [Receiving block BP-1384133828-172.17.0.2-1731299690940:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:43511:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52394 dst: /127.0.0.1:43511 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:35:05,221 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7096145a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T04:35:05,221 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@13bf27af{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T04:35:05,221 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T04:35:05,222 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@168478e8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T04:35:05,222 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@78e57521{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88b5afca-deb2-5599-4ad7-aea3a40a678b/hadoop.log.dir/,STOPPED} 2024-11-11T04:35:05,223 WARN [BP-1384133828-172.17.0.2-1731299690940 heartbeating to localhost/127.0.0.1:45989 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T04:35:05,223 WARN [BP-1384133828-172.17.0.2-1731299690940 heartbeating to localhost/127.0.0.1:45989 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1384133828-172.17.0.2-1731299690940 (Datanode Uuid 5bdc0d2d-5e97-4e14-9cd6-3d3c6eabc44e) service to localhost/127.0.0.1:45989 2024-11-11T04:35:05,223 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T04:35:05,223 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T04:35:05,224 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88b5afca-deb2-5599-4ad7-aea3a40a678b/cluster_aaae5ed2-a833-51e3-3a29-3cd15d4b1b16/data/data1/current/BP-1384133828-172.17.0.2-1731299690940 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T04:35:05,224 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88b5afca-deb2-5599-4ad7-aea3a40a678b/cluster_aaae5ed2-a833-51e3-3a29-3cd15d4b1b16/data/data2/current/BP-1384133828-172.17.0.2-1731299690940 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T04:35:05,224 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T04:35:05,231 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T04:35:05,234 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T04:35:05,268 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T04:35:05,268 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T04:35:05,268 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-11T04:35:05,269 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@27265229{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88b5afca-deb2-5599-4ad7-aea3a40a678b/hadoop.log.dir/,AVAILABLE} 2024-11-11T04:35:05,269 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@156f820b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T04:35:05,322 WARN [Thread-1347 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T04:35:05,325 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7ccad88f6a8d1da with lease ID 0x479abea76b6abe: from storage DS-d80c624a-0f9d-47a0-bb5a-38dd126d81a1 node DatanodeRegistration(127.0.0.1:41479, datanodeUuid=63425e5e-0ac8-4381-82ea-5b5425f0f552, infoPort=40159, infoSecurePort=0, ipcPort=33451, storageInfo=lv=-57;cid=testClusterID;nsid=1713708580;c=1731299690940), blocks: 6, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-11T04:35:05,325 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7ccad88f6a8d1da with lease ID 0x479abea76b6abe: from storage DS-60b61927-4167-482c-8710-30f94aae55cf node DatanodeRegistration(127.0.0.1:41479, datanodeUuid=63425e5e-0ac8-4381-82ea-5b5425f0f552, infoPort=40159, infoSecurePort=0, ipcPort=33451, storageInfo=lv=-57;cid=testClusterID;nsid=1713708580;c=1731299690940), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T04:35:05,381 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@545fd1d9{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88b5afca-deb2-5599-4ad7-aea3a40a678b/java.io.tmpdir/jetty-localhost-38417-hadoop-hdfs-3_4_1-tests_jar-_-any-5185439697003126826/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T04:35:05,381 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@61726e31{HTTP/1.1, (http/1.1)}{localhost:38417} 2024-11-11T04:35:05,381 INFO [Time-limited test {}] server.Server(415): Started @163183ms 2024-11-11T04:35:05,383 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T04:35:05,385 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:05,395 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:05,485 WARN [Thread-1378 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T04:35:05,488 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x310348a25941ecc2 with lease ID 0x479abea76b6abf: from storage DS-6566c4a9-3245-49c8-bf5f-7b1a9d93becb node DatanodeRegistration(127.0.0.1:41277, datanodeUuid=5bdc0d2d-5e97-4e14-9cd6-3d3c6eabc44e, infoPort=41513, infoSecurePort=0, ipcPort=37213, storageInfo=lv=-57;cid=testClusterID;nsid=1713708580;c=1731299690940), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T04:35:05,488 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x310348a25941ecc2 with lease ID 0x479abea76b6abf: from storage DS-a09f1d5d-f986-4c2b-b5fc-48c34034172a node DatanodeRegistration(127.0.0.1:41277, datanodeUuid=5bdc0d2d-5e97-4e14-9cd6-3d3c6eabc44e, infoPort=41513, infoSecurePort=0, ipcPort=37213, storageInfo=lv=-57;cid=testClusterID;nsid=1713708580;c=1731299690940), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T04:35:06,386 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:06,396 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:06,400 INFO [Time-limited test {}] wal.TestLogRolling(372): Data Nodes restarted 2024-11-11T04:35:06,403 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1002 2024-11-11T04:35:06,404 ERROR [FSHLog-0-hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94-prefix:a7bef91497aa,36711,1731299691635 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43511,DS-6566c4a9-3245-49c8-bf5f-7b1a9d93becb,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:35:06,404 WARN [FSHLog-0-hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94-prefix:a7bef91497aa,36711,1731299691635 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43511,DS-6566c4a9-3245-49c8-bf5f-7b1a9d93becb,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:35:06,404 DEBUG [regionserver/a7bef91497aa:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog a7bef91497aa%2C36711%2C1731299691635:(num 1731299692417) roll requested 2024-11-11T04:35:06,405 INFO [regionserver/a7bef91497aa:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor a7bef91497aa%2C36711%2C1731299691635.1731299706404 2024-11-11T04:35:06,410 DEBUG [regionserver/a7bef91497aa:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/WALs/a7bef91497aa,36711,1731299691635/a7bef91497aa%2C36711%2C1731299691635.1731299692417 newFile=hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/WALs/a7bef91497aa,36711,1731299691635/a7bef91497aa%2C36711%2C1731299691635.1731299706404 2024-11-11T04:35:06,410 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:35:06,411 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:35:06,411 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:35:06,411 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:35:06,411 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:35:06,411 INFO [regionserver/a7bef91497aa:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/WALs/a7bef91497aa,36711,1731299691635/a7bef91497aa%2C36711%2C1731299691635.1731299692417 with entries=2, filesize=1.59 KB; new WAL /user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/WALs/a7bef91497aa,36711,1731299691635/a7bef91497aa%2C36711%2C1731299691635.1731299706404 2024-11-11T04:35:06,411 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43511,DS-6566c4a9-3245-49c8-bf5f-7b1a9d93becb,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:35:06,412 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43511,DS-6566c4a9-3245-49c8-bf5f-7b1a9d93becb,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:35:06,412 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/WALs/a7bef91497aa,36711,1731299691635/a7bef91497aa%2C36711%2C1731299691635.1731299692417 2024-11-11T04:35:06,412 WARN [IPC Server handler 2 on default port 45989 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/WALs/a7bef91497aa,36711,1731299691635/a7bef91497aa%2C36711%2C1731299691635.1731299692417 has not been closed. Lease recovery is in progress. RecoveryId = 1017 for block blk_1073741833_1013 2024-11-11T04:35:06,412 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/WALs/a7bef91497aa,36711,1731299691635/a7bef91497aa%2C36711%2C1731299691635.1731299692417 after 0ms 2024-11-11T04:35:06,416 DEBUG [regionserver/a7bef91497aa:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40159:40159),(127.0.0.1/127.0.0.1:41513:41513)] 2024-11-11T04:35:06,416 DEBUG [regionserver/a7bef91497aa:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/WALs/a7bef91497aa,36711,1731299691635/a7bef91497aa%2C36711%2C1731299691635.1731299692417 is not closed yet, will try archiving it next time 2024-11-11T04:35:06,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41277 is added to blk_1073741833_1017 (size=1632) 2024-11-11T04:35:07,386 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:07,396 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:08,387 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:08,397 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:08,421 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1003 2024-11-11T04:35:09,324 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741833_1013: GenerationStamp not matched, existing replica is blk_1073741833_1009 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-11T04:35:09,388 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:09,397 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:10,388 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:10,397 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:10,413 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/WALs/a7bef91497aa,36711,1731299691635/a7bef91497aa%2C36711%2C1731299691635.1731299692417 after 4001ms 2024-11-11T04:35:10,424 WARN [ResponseProcessor for block BP-1384133828-172.17.0.2-1731299690940:blk_1073741837_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1384133828-172.17.0.2-1731299690940:blk_1073741837_1016 java.io.IOException: Bad response ERROR for BP-1384133828-172.17.0.2-1731299690940:blk_1073741837_1016 from datanode DatanodeInfoWithStorage[127.0.0.1:41277,DS-6566c4a9-3245-49c8-bf5f-7b1a9d93becb,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:35:10,424 WARN [DataStreamer for file /user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/WALs/a7bef91497aa,36711,1731299691635/a7bef91497aa%2C36711%2C1731299691635.1731299706404 block BP-1384133828-172.17.0.2-1731299690940:blk_1073741837_1016 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1384133828-172.17.0.2-1731299690940:blk_1073741837_1016 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41479,DS-d80c624a-0f9d-47a0-bb5a-38dd126d81a1,DISK], DatanodeInfoWithStorage[127.0.0.1:41277,DS-6566c4a9-3245-49c8-bf5f-7b1a9d93becb,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41277,DS-6566c4a9-3245-49c8-bf5f-7b1a9d93becb,DISK]) is bad. 2024-11-11T04:35:10,424 WARN [PacketResponder: BP-1384133828-172.17.0.2-1731299690940:blk_1073741837_1016, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:41277] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:35:10,424 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_784603438_22 at /127.0.0.1:44704 [Receiving block BP-1384133828-172.17.0.2-1731299690940:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:41479:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44704 dst: /127.0.0.1:41479 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:35:10,425 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_784603438_22 at /127.0.0.1:38988 [Receiving block BP-1384133828-172.17.0.2-1731299690940:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:41277:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38988 dst: /127.0.0.1:41277 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:35:10,426 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@545fd1d9{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T04:35:10,426 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@61726e31{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T04:35:10,426 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T04:35:10,426 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@156f820b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T04:35:10,427 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@27265229{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88b5afca-deb2-5599-4ad7-aea3a40a678b/hadoop.log.dir/,STOPPED} 2024-11-11T04:35:10,428 WARN [BP-1384133828-172.17.0.2-1731299690940 heartbeating to localhost/127.0.0.1:45989 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T04:35:10,428 WARN [BP-1384133828-172.17.0.2-1731299690940 heartbeating to localhost/127.0.0.1:45989 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1384133828-172.17.0.2-1731299690940 (Datanode Uuid 5bdc0d2d-5e97-4e14-9cd6-3d3c6eabc44e) service to localhost/127.0.0.1:45989 2024-11-11T04:35:10,428 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T04:35:10,428 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T04:35:10,429 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88b5afca-deb2-5599-4ad7-aea3a40a678b/cluster_aaae5ed2-a833-51e3-3a29-3cd15d4b1b16/data/data1/current/BP-1384133828-172.17.0.2-1731299690940 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T04:35:10,430 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88b5afca-deb2-5599-4ad7-aea3a40a678b/cluster_aaae5ed2-a833-51e3-3a29-3cd15d4b1b16/data/data2/current/BP-1384133828-172.17.0.2-1731299690940 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T04:35:10,430 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T04:35:10,438 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T04:35:10,441 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T04:35:10,444 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T04:35:10,444 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T04:35:10,444 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-11T04:35:10,445 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7c83d523{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88b5afca-deb2-5599-4ad7-aea3a40a678b/hadoop.log.dir/,AVAILABLE} 2024-11-11T04:35:10,445 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3a397072{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T04:35:10,555 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1223970{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88b5afca-deb2-5599-4ad7-aea3a40a678b/java.io.tmpdir/jetty-localhost-37151-hadoop-hdfs-3_4_1-tests_jar-_-any-6674874695369984559/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T04:35:10,555 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@282cfbc9{HTTP/1.1, (http/1.1)}{localhost:37151} 2024-11-11T04:35:10,555 INFO [Time-limited test {}] server.Server(415): Started @168357ms 2024-11-11T04:35:10,557 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T04:35:10,576 WARN [ResponseProcessor for block BP-1384133828-172.17.0.2-1731299690940:blk_1073741837_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1384133828-172.17.0.2-1731299690940:blk_1073741837_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:35:10,577 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_784603438_22 at /127.0.0.1:45760 [Receiving block BP-1384133828-172.17.0.2-1731299690940:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:41479:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45760 dst: /127.0.0.1:41479 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:35:10,582 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@269fb75c{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T04:35:10,582 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@23080db5{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T04:35:10,582 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T04:35:10,582 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@371cd9e1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T04:35:10,582 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@67a2c057{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88b5afca-deb2-5599-4ad7-aea3a40a678b/hadoop.log.dir/,STOPPED} 2024-11-11T04:35:10,583 WARN [BP-1384133828-172.17.0.2-1731299690940 heartbeating to localhost/127.0.0.1:45989 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T04:35:10,584 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T04:35:10,584 WARN [BP-1384133828-172.17.0.2-1731299690940 heartbeating to localhost/127.0.0.1:45989 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1384133828-172.17.0.2-1731299690940 (Datanode Uuid 63425e5e-0ac8-4381-82ea-5b5425f0f552) service to localhost/127.0.0.1:45989 2024-11-11T04:35:10,584 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T04:35:10,584 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88b5afca-deb2-5599-4ad7-aea3a40a678b/cluster_aaae5ed2-a833-51e3-3a29-3cd15d4b1b16/data/data3/current/BP-1384133828-172.17.0.2-1731299690940 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T04:35:10,584 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88b5afca-deb2-5599-4ad7-aea3a40a678b/cluster_aaae5ed2-a833-51e3-3a29-3cd15d4b1b16/data/data4/current/BP-1384133828-172.17.0.2-1731299690940 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T04:35:10,585 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T04:35:10,597 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T04:35:10,602 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T04:35:10,605 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T04:35:10,605 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T04:35:10,605 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-11T04:35:10,606 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@42bf2aaa{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88b5afca-deb2-5599-4ad7-aea3a40a678b/hadoop.log.dir/,AVAILABLE} 2024-11-11T04:35:10,606 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1dad3af2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T04:35:10,641 WARN [Thread-1421 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T04:35:10,643 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7cd175ca90264592 with lease ID 0x479abea76b6ac0: from storage DS-6566c4a9-3245-49c8-bf5f-7b1a9d93becb node DatanodeRegistration(127.0.0.1:42279, datanodeUuid=5bdc0d2d-5e97-4e14-9cd6-3d3c6eabc44e, infoPort=46313, infoSecurePort=0, ipcPort=34841, storageInfo=lv=-57;cid=testClusterID;nsid=1713708580;c=1731299690940), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T04:35:10,643 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7cd175ca90264592 with lease ID 0x479abea76b6ac0: from storage DS-a09f1d5d-f986-4c2b-b5fc-48c34034172a node DatanodeRegistration(127.0.0.1:42279, datanodeUuid=5bdc0d2d-5e97-4e14-9cd6-3d3c6eabc44e, infoPort=46313, infoSecurePort=0, ipcPort=34841, storageInfo=lv=-57;cid=testClusterID;nsid=1713708580;c=1731299690940), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T04:35:10,718 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@474594d6{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88b5afca-deb2-5599-4ad7-aea3a40a678b/java.io.tmpdir/jetty-localhost-46597-hadoop-hdfs-3_4_1-tests_jar-_-any-7285449852359099107/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T04:35:10,719 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3cea2bec{HTTP/1.1, (http/1.1)}{localhost:46597} 2024-11-11T04:35:10,719 INFO [Time-limited test {}] server.Server(415): Started @168521ms 2024-11-11T04:35:10,721 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T04:35:10,802 WARN [Thread-1452 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T04:35:10,804 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa1136e27c7cc17f9 with lease ID 0x479abea76b6ac1: from storage DS-d80c624a-0f9d-47a0-bb5a-38dd126d81a1 node DatanodeRegistration(127.0.0.1:37773, datanodeUuid=63425e5e-0ac8-4381-82ea-5b5425f0f552, infoPort=40665, infoSecurePort=0, ipcPort=37665, storageInfo=lv=-57;cid=testClusterID;nsid=1713708580;c=1731299690940), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T04:35:10,804 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa1136e27c7cc17f9 with lease ID 0x479abea76b6ac1: from storage DS-60b61927-4167-482c-8710-30f94aae55cf node DatanodeRegistration(127.0.0.1:37773, datanodeUuid=63425e5e-0ac8-4381-82ea-5b5425f0f552, infoPort=40665, infoSecurePort=0, ipcPort=37665, storageInfo=lv=-57;cid=testClusterID;nsid=1713708580;c=1731299690940), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T04:35:11,389 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:11,398 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:11,738 INFO [Time-limited test {}] wal.TestLogRolling(389): Data Nodes restarted 2024-11-11T04:35:11,740 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1004 2024-11-11T04:35:11,742 ERROR [FSHLog-0-hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94-prefix:a7bef91497aa,36711,1731299691635 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41479,DS-d80c624a-0f9d-47a0-bb5a-38dd126d81a1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:35:11,742 WARN [FSHLog-0-hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94-prefix:a7bef91497aa,36711,1731299691635 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41479,DS-d80c624a-0f9d-47a0-bb5a-38dd126d81a1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:35:11,742 DEBUG [regionserver/a7bef91497aa:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog a7bef91497aa%2C36711%2C1731299691635:(num 1731299706404) roll requested 2024-11-11T04:35:11,742 INFO [regionserver/a7bef91497aa:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor a7bef91497aa%2C36711%2C1731299691635.1731299711742 2024-11-11T04:35:11,747 DEBUG [regionserver/a7bef91497aa:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/WALs/a7bef91497aa,36711,1731299691635/a7bef91497aa%2C36711%2C1731299691635.1731299706404 newFile=hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/WALs/a7bef91497aa,36711,1731299691635/a7bef91497aa%2C36711%2C1731299691635.1731299711742 2024-11-11T04:35:11,747 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:35:11,747 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:35:11,748 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:35:11,748 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:35:11,748 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:35:11,748 INFO [regionserver/a7bef91497aa:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/WALs/a7bef91497aa,36711,1731299691635/a7bef91497aa%2C36711%2C1731299691635.1731299706404 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/WALs/a7bef91497aa,36711,1731299691635/a7bef91497aa%2C36711%2C1731299691635.1731299711742 2024-11-11T04:35:11,748 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41479,DS-d80c624a-0f9d-47a0-bb5a-38dd126d81a1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:35:11,748 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41479,DS-d80c624a-0f9d-47a0-bb5a-38dd126d81a1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:35:11,748 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/WALs/a7bef91497aa,36711,1731299691635/a7bef91497aa%2C36711%2C1731299691635.1731299706404 2024-11-11T04:35:11,749 DEBUG [regionserver/a7bef91497aa:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46313:46313),(127.0.0.1/127.0.0.1:40665:40665)] 2024-11-11T04:35:11,749 WARN [IPC Server handler 1 on default port 45989 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/WALs/a7bef91497aa,36711,1731299691635/a7bef91497aa%2C36711%2C1731299691635.1731299706404 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741837_1018 2024-11-11T04:35:11,749 DEBUG [regionserver/a7bef91497aa:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/WALs/a7bef91497aa,36711,1731299691635/a7bef91497aa%2C36711%2C1731299691635.1731299706404 is not closed yet, will try archiving it next time 2024-11-11T04:35:11,749 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/WALs/a7bef91497aa,36711,1731299691635/a7bef91497aa%2C36711%2C1731299691635.1731299706404 after 1ms 2024-11-11T04:35:11,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37773 is added to blk_1073741837_1020 (size=2427) 2024-11-11T04:35:12,389 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:12,398 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:13,390 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:13,399 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:13,750 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor a7bef91497aa%2C36711%2C1731299691635.1731299713750 2024-11-11T04:35:13,756 DEBUG [Time-limited test {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/WALs/a7bef91497aa,36711,1731299691635/a7bef91497aa%2C36711%2C1731299691635.1731299711742 newFile=hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/WALs/a7bef91497aa,36711,1731299691635/a7bef91497aa%2C36711%2C1731299691635.1731299713750 2024-11-11T04:35:13,756 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:35:13,756 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:35:13,756 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:35:13,756 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:35:13,756 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:35:13,757 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/WALs/a7bef91497aa,36711,1731299691635/a7bef91497aa%2C36711%2C1731299691635.1731299711742 with entries=1, filesize=1.23 KB; new WAL /user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/WALs/a7bef91497aa,36711,1731299691635/a7bef91497aa%2C36711%2C1731299691635.1731299713750 2024-11-11T04:35:13,757 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40665:40665),(127.0.0.1/127.0.0.1:46313:46313)] 2024-11-11T04:35:13,757 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/WALs/a7bef91497aa,36711,1731299691635/a7bef91497aa%2C36711%2C1731299691635.1731299706404 is not closed yet, will try archiving it next time 2024-11-11T04:35:13,757 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/WALs/a7bef91497aa,36711,1731299691635/a7bef91497aa%2C36711%2C1731299691635.1731299711742 is not closed yet, will try archiving it next time 2024-11-11T04:35:13,758 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/WALs/a7bef91497aa,36711,1731299691635/a7bef91497aa%2C36711%2C1731299691635.1731299692417 2024-11-11T04:35:13,758 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/WALs/a7bef91497aa,36711,1731299691635/a7bef91497aa%2C36711%2C1731299691635.1731299692417 2024-11-11T04:35:13,758 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/WALs/a7bef91497aa,36711,1731299691635/a7bef91497aa%2C36711%2C1731299691635.1731299692417 after 0ms 2024-11-11T04:35:13,758 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/WALs/a7bef91497aa,36711,1731299691635/a7bef91497aa%2C36711%2C1731299691635.1731299692417 2024-11-11T04:35:13,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37773 is added to blk_1073741838_1019 (size=1264) 2024-11-11T04:35:13,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42279 is added to blk_1073741838_1019 (size=1264) 2024-11-11T04:35:13,759 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/WALs/a7bef91497aa,36711,1731299691635/a7bef91497aa%2C36711%2C1731299691635.1731299706404 is not closed yet, will try archiving it next time 2024-11-11T04:35:13,770 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1731299693316/Put/vlen=218/seqid=0] 2024-11-11T04:35:13,771 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #4: [row1002/info:/1731299703062/Put/vlen=1045/seqid=0] 2024-11-11T04:35:13,771 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/WALs/a7bef91497aa,36711,1731299691635/a7bef91497aa%2C36711%2C1731299691635.1731299692417 2024-11-11T04:35:13,771 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/WALs/a7bef91497aa,36711,1731299691635/a7bef91497aa%2C36711%2C1731299691635.1731299706404 2024-11-11T04:35:13,771 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/WALs/a7bef91497aa,36711,1731299691635/a7bef91497aa%2C36711%2C1731299691635.1731299706404 2024-11-11T04:35:13,771 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/WALs/a7bef91497aa,36711,1731299691635/a7bef91497aa%2C36711%2C1731299691635.1731299706404 after 0ms 2024-11-11T04:35:13,771 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/WALs/a7bef91497aa,36711,1731299691635/a7bef91497aa%2C36711%2C1731299691635.1731299706404 2024-11-11T04:35:13,774 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #5: [row1003/info:/1731299706404/Put/vlen=1045/seqid=0] 2024-11-11T04:35:13,775 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #6: [row1004/info:/1731299708422/Put/vlen=1045/seqid=0] 2024-11-11T04:35:13,775 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/WALs/a7bef91497aa,36711,1731299691635/a7bef91497aa%2C36711%2C1731299691635.1731299706404 2024-11-11T04:35:13,775 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/WALs/a7bef91497aa,36711,1731299691635/a7bef91497aa%2C36711%2C1731299691635.1731299711742 2024-11-11T04:35:13,775 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/WALs/a7bef91497aa,36711,1731299691635/a7bef91497aa%2C36711%2C1731299691635.1731299711742 2024-11-11T04:35:13,775 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/WALs/a7bef91497aa,36711,1731299691635/a7bef91497aa%2C36711%2C1731299691635.1731299711742 after 0ms 2024-11-11T04:35:13,775 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/WALs/a7bef91497aa,36711,1731299691635/a7bef91497aa%2C36711%2C1731299691635.1731299711742 2024-11-11T04:35:13,778 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #7: [row1005/info:/1731299711741/Put/vlen=1045/seqid=0] 2024-11-11T04:35:13,778 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/WALs/a7bef91497aa,36711,1731299691635/a7bef91497aa%2C36711%2C1731299691635.1731299713750 2024-11-11T04:35:13,778 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/WALs/a7bef91497aa,36711,1731299691635/a7bef91497aa%2C36711%2C1731299691635.1731299713750 2024-11-11T04:35:13,779 WARN [IPC Server handler 4 on default port 45989 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/WALs/a7bef91497aa,36711,1731299691635/a7bef91497aa%2C36711%2C1731299691635.1731299713750 has not been closed. Lease recovery is in progress. RecoveryId = 1022 for block blk_1073741839_1021 2024-11-11T04:35:13,779 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/WALs/a7bef91497aa,36711,1731299691635/a7bef91497aa%2C36711%2C1731299691635.1731299713750 after 0ms 2024-11-11T04:35:14,390 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:14,399 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:14,648 WARN [ResponseProcessor for block BP-1384133828-172.17.0.2-1731299690940:blk_1073741839_1021 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1384133828-172.17.0.2-1731299690940:blk_1073741839_1021 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:35:14,648 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_285959890_22 at /127.0.0.1:54576 [Receiving block BP-1384133828-172.17.0.2-1731299690940:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:37773:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54576 dst: /127.0.0.1:37773 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:37773 remote=/127.0.0.1:54576]. Total timeout mills is 60000, 59108 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:35:14,648 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_285959890_22 at /127.0.0.1:47930 [Receiving block BP-1384133828-172.17.0.2-1731299690940:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:42279:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47930 dst: /127.0.0.1:42279 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:35:14,648 WARN [DataStreamer for file /user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/WALs/a7bef91497aa,36711,1731299691635/a7bef91497aa%2C36711%2C1731299691635.1731299713750 block BP-1384133828-172.17.0.2-1731299690940:blk_1073741839_1021 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1384133828-172.17.0.2-1731299690940:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37773,DS-d80c624a-0f9d-47a0-bb5a-38dd126d81a1,DISK], DatanodeInfoWithStorage[127.0.0.1:42279,DS-6566c4a9-3245-49c8-bf5f-7b1a9d93becb,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37773,DS-d80c624a-0f9d-47a0-bb5a-38dd126d81a1,DISK]) is bad. 2024-11-11T04:35:14,649 WARN [DataStreamer for file /user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/WALs/a7bef91497aa,36711,1731299691635/a7bef91497aa%2C36711%2C1731299691635.1731299713750 block BP-1384133828-172.17.0.2-1731299690940:blk_1073741839_1021 {}] hdfs.DataStreamer(859): DataStreamer Exception org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1384133828-172.17.0.2-1731299690940:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:35:14,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37773 is added to blk_1073741839_1022 (size=85) 2024-11-11T04:35:15,391 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:15,400 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:15,644 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741837_1018: GenerationStamp not matched, existing replica is blk_1073741837_1016 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-11T04:35:15,750 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/WALs/a7bef91497aa,36711,1731299691635/a7bef91497aa%2C36711%2C1731299691635.1731299706404 after 4002ms 2024-11-11T04:35:16,392 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:16,400 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:17,392 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:17,401 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:17,780 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/WALs/a7bef91497aa,36711,1731299691635/a7bef91497aa%2C36711%2C1731299691635.1731299713750 after 4001ms 2024-11-11T04:35:17,780 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/WALs/a7bef91497aa,36711,1731299691635/a7bef91497aa%2C36711%2C1731299691635.1731299713750 2024-11-11T04:35:17,783 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/WALs/a7bef91497aa,36711,1731299691635/a7bef91497aa%2C36711%2C1731299691635.1731299713750 2024-11-11T04:35:17,784 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing b64134db5f9e04e68fa2887624d07f22 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-11-11T04:35:17,784 ERROR [FSHLog-0-hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94-prefix:a7bef91497aa,36711,1731299691635 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1384133828-172.17.0.2-1731299690940:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:35:17,785 WARN [FSHLog-0-hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94-prefix:a7bef91497aa,36711,1731299691635 {}] wal.AbstractFSWAL(2174): append entry failed org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1384133828-172.17.0.2-1731299690940:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:35:17,785 DEBUG [regionserver/a7bef91497aa:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog a7bef91497aa%2C36711%2C1731299691635:(num 1731299713750) roll requested 2024-11-11T04:35:17,785 INFO [regionserver/a7bef91497aa:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor a7bef91497aa%2C36711%2C1731299691635.1731299717785 2024-11-11T04:35:17,790 DEBUG [regionserver/a7bef91497aa:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/WALs/a7bef91497aa,36711,1731299691635/a7bef91497aa%2C36711%2C1731299691635.1731299713750 newFile=hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/WALs/a7bef91497aa,36711,1731299691635/a7bef91497aa%2C36711%2C1731299691635.1731299717785 2024-11-11T04:35:17,790 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:35:17,790 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:35:17,791 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:35:17,791 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:35:17,791 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:35:17,791 INFO [regionserver/a7bef91497aa:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/WALs/a7bef91497aa,36711,1731299691635/a7bef91497aa%2C36711%2C1731299691635.1731299713750 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/WALs/a7bef91497aa,36711,1731299691635/a7bef91497aa%2C36711%2C1731299691635.1731299717785 2024-11-11T04:35:17,791 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1384133828-172.17.0.2-1731299690940:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:35:17,791 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1384133828-172.17.0.2-1731299690940:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:35:17,792 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/WALs/a7bef91497aa,36711,1731299691635/a7bef91497aa%2C36711%2C1731299691635.1731299713750 2024-11-11T04:35:17,792 DEBUG [regionserver/a7bef91497aa:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46313:46313),(127.0.0.1/127.0.0.1:40665:40665)] 2024-11-11T04:35:17,792 DEBUG [regionserver/a7bef91497aa:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/WALs/a7bef91497aa,36711,1731299691635/a7bef91497aa%2C36711%2C1731299691635.1731299713750 is not closed yet, will try archiving it next time 2024-11-11T04:35:17,792 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/WALs/a7bef91497aa,36711,1731299691635/a7bef91497aa%2C36711%2C1731299691635.1731299713750 after 0ms 2024-11-11T04:35:17,793 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/WALs/a7bef91497aa,36711,1731299691635/a7bef91497aa%2C36711%2C1731299691635.1731299713750 to hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/oldWALs/a7bef91497aa%2C36711%2C1731299691635.1731299713750 2024-11-11T04:35:17,808 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/data/default/TestLogRolling-testLogRollOnPipelineRestart/b64134db5f9e04e68fa2887624d07f22/.tmp/info/8c17349ecb3d4a5c9ef83c8cd0828c9c is 1080, key is row1002/info:/1731299703062/Put/seqid=0 2024-11-11T04:35:17,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37773 is added to blk_1073741841_1024 (size=9270) 2024-11-11T04:35:17,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42279 is added to blk_1073741841_1024 (size=9270) 2024-11-11T04:35:17,813 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=8 (bloomFilter=true), to=hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/data/default/TestLogRolling-testLogRollOnPipelineRestart/b64134db5f9e04e68fa2887624d07f22/.tmp/info/8c17349ecb3d4a5c9ef83c8cd0828c9c 2024-11-11T04:35:17,819 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/data/default/TestLogRolling-testLogRollOnPipelineRestart/b64134db5f9e04e68fa2887624d07f22/.tmp/info/8c17349ecb3d4a5c9ef83c8cd0828c9c as hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/data/default/TestLogRolling-testLogRollOnPipelineRestart/b64134db5f9e04e68fa2887624d07f22/info/8c17349ecb3d4a5c9ef83c8cd0828c9c 2024-11-11T04:35:17,823 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/data/default/TestLogRolling-testLogRollOnPipelineRestart/b64134db5f9e04e68fa2887624d07f22/info/8c17349ecb3d4a5c9ef83c8cd0828c9c, entries=4, sequenceid=8, filesize=9.1 K 2024-11-11T04:35:17,824 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for b64134db5f9e04e68fa2887624d07f22 in 40ms, sequenceid=8, compaction requested=false 2024-11-11T04:35:17,824 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for b64134db5f9e04e68fa2887624d07f22: 2024-11-11T04:35:17,825 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.74 KB heapSize=3.77 KB 2024-11-11T04:35:17,825 ERROR [FSHLog-0-hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94-prefix:a7bef91497aa,36711,1731299691635.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43511,DS-6566c4a9-3245-49c8-bf5f-7b1a9d93becb,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:35:17,825 WARN [FSHLog-0-hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94-prefix:a7bef91497aa,36711,1731299691635.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43511,DS-6566c4a9-3245-49c8-bf5f-7b1a9d93becb,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:35:17,825 DEBUG [regionserver/a7bef91497aa:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog a7bef91497aa%2C36711%2C1731299691635.meta:.meta(num 1731299692796) roll requested 2024-11-11T04:35:17,826 INFO [regionserver/a7bef91497aa:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor a7bef91497aa%2C36711%2C1731299691635.meta.1731299717825.meta 2024-11-11T04:35:17,831 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:35:17,831 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:35:17,831 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:35:17,831 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:35:17,831 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:35:17,831 INFO [regionserver/a7bef91497aa:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/WALs/a7bef91497aa,36711,1731299691635/a7bef91497aa%2C36711%2C1731299691635.meta.1731299692796.meta with entries=8, filesize=2.36 KB; new WAL /user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/WALs/a7bef91497aa,36711,1731299691635/a7bef91497aa%2C36711%2C1731299691635.meta.1731299717825.meta 2024-11-11T04:35:17,831 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43511,DS-6566c4a9-3245-49c8-bf5f-7b1a9d93becb,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:35:17,832 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43511,DS-6566c4a9-3245-49c8-bf5f-7b1a9d93becb,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:35:17,832 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/WALs/a7bef91497aa,36711,1731299691635/a7bef91497aa%2C36711%2C1731299691635.meta.1731299692796.meta 2024-11-11T04:35:17,832 WARN [IPC Server handler 4 on default port 45989 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/WALs/a7bef91497aa,36711,1731299691635/a7bef91497aa%2C36711%2C1731299691635.meta.1731299692796.meta has not been closed. Lease recovery is in progress. RecoveryId = 1026 for block blk_1073741834_1014 2024-11-11T04:35:17,832 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/WALs/a7bef91497aa,36711,1731299691635/a7bef91497aa%2C36711%2C1731299691635.meta.1731299692796.meta after 0ms 2024-11-11T04:35:17,833 DEBUG [regionserver/a7bef91497aa:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40665:40665),(127.0.0.1/127.0.0.1:46313:46313)] 2024-11-11T04:35:17,833 DEBUG [regionserver/a7bef91497aa:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/WALs/a7bef91497aa,36711,1731299691635/a7bef91497aa%2C36711%2C1731299691635.meta.1731299692796.meta is not closed yet, will try archiving it next time 2024-11-11T04:35:17,849 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/data/hbase/meta/1588230740/.tmp/info/0055c7fc8da141fbb716fec85d4a22c4 is 207, key is TestLogRolling-testLogRollOnPipelineRestart,,1731299692965.b64134db5f9e04e68fa2887624d07f22./info:regioninfo/1731299693320/Put/seqid=0 2024-11-11T04:35:17,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37773 is added to blk_1073741843_1027 (size=7125) 2024-11-11T04:35:17,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42279 is added to blk_1073741843_1027 (size=7125) 2024-11-11T04:35:17,855 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.52 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/data/hbase/meta/1588230740/.tmp/info/0055c7fc8da141fbb716fec85d4a22c4 2024-11-11T04:35:17,874 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/data/hbase/meta/1588230740/.tmp/ns/edb0c80062c241c093d3791ffe7e22fa is 43, key is default/ns:d/1731299692840/Put/seqid=0 2024-11-11T04:35:17,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37773 is added to blk_1073741844_1028 (size=5153) 2024-11-11T04:35:17,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42279 is added to blk_1073741844_1028 (size=5153) 2024-11-11T04:35:17,879 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/data/hbase/meta/1588230740/.tmp/ns/edb0c80062c241c093d3791ffe7e22fa 2024-11-11T04:35:17,897 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/data/hbase/meta/1588230740/.tmp/table/5354eb64b2bc495081e086cc196b44ae is 79, key is TestLogRolling-testLogRollOnPipelineRestart/table:state/1731299693330/Put/seqid=0 2024-11-11T04:35:17,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37773 is added to blk_1073741845_1029 (size=5438) 2024-11-11T04:35:17,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42279 is added to blk_1073741845_1029 (size=5438) 2024-11-11T04:35:17,902 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=150 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/data/hbase/meta/1588230740/.tmp/table/5354eb64b2bc495081e086cc196b44ae 2024-11-11T04:35:17,907 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/data/hbase/meta/1588230740/.tmp/info/0055c7fc8da141fbb716fec85d4a22c4 as hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/data/hbase/meta/1588230740/info/0055c7fc8da141fbb716fec85d4a22c4 2024-11-11T04:35:17,912 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/data/hbase/meta/1588230740/info/0055c7fc8da141fbb716fec85d4a22c4, entries=10, sequenceid=11, filesize=7.0 K 2024-11-11T04:35:17,913 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/data/hbase/meta/1588230740/.tmp/ns/edb0c80062c241c093d3791ffe7e22fa as hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/data/hbase/meta/1588230740/ns/edb0c80062c241c093d3791ffe7e22fa 2024-11-11T04:35:17,918 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/data/hbase/meta/1588230740/ns/edb0c80062c241c093d3791ffe7e22fa, entries=2, sequenceid=11, filesize=5.0 K 2024-11-11T04:35:17,919 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/data/hbase/meta/1588230740/.tmp/table/5354eb64b2bc495081e086cc196b44ae as hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/data/hbase/meta/1588230740/table/5354eb64b2bc495081e086cc196b44ae 2024-11-11T04:35:17,925 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/data/hbase/meta/1588230740/table/5354eb64b2bc495081e086cc196b44ae, entries=2, sequenceid=11, filesize=5.3 K 2024-11-11T04:35:17,926 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.74 KB/1782, heapSize ~3.48 KB/3560, currentSize=0 B/0 for 1588230740 in 101ms, sequenceid=11, compaction requested=false 2024-11-11T04:35:17,926 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-11T04:35:17,931 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-11T04:35:17,932 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-11T04:35:17,932 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T04:35:17,932 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T04:35:17,932 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T04:35:17,932 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-11T04:35:17,932 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-11T04:35:17,932 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=737493324, stopped=false 2024-11-11T04:35:17,932 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=a7bef91497aa,45233,1731299691590 2024-11-11T04:35:17,934 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45233-0x101959c41a00000, quorum=127.0.0.1:61413, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-11T04:35:17,934 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36711-0x101959c41a00001, quorum=127.0.0.1:61413, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-11T04:35:17,934 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45233-0x101959c41a00000, quorum=127.0.0.1:61413, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:35:17,934 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-11T04:35:17,934 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36711-0x101959c41a00001, quorum=127.0.0.1:61413, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:35:17,934 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-11T04:35:17,934 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T04:35:17,934 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T04:35:17,934 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'a7bef91497aa,36711,1731299691635' ***** 2024-11-11T04:35:17,934 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-11T04:35:17,935 INFO [RS:0;a7bef91497aa:36711 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-11T04:35:17,935 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:45233-0x101959c41a00000, quorum=127.0.0.1:61413, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T04:35:17,935 INFO [RS:0;a7bef91497aa:36711 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-11T04:35:17,935 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-11T04:35:17,935 INFO [RS:0;a7bef91497aa:36711 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-11T04:35:17,935 INFO [RS:0;a7bef91497aa:36711 {}] regionserver.HRegionServer(3091): Received CLOSE for b64134db5f9e04e68fa2887624d07f22 2024-11-11T04:35:17,935 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:36711-0x101959c41a00001, quorum=127.0.0.1:61413, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T04:35:17,935 INFO [RS:0;a7bef91497aa:36711 {}] regionserver.HRegionServer(959): stopping server a7bef91497aa,36711,1731299691635 2024-11-11T04:35:17,935 INFO [RS:0;a7bef91497aa:36711 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-11T04:35:17,935 INFO [RS:0;a7bef91497aa:36711 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;a7bef91497aa:36711. 2024-11-11T04:35:17,935 DEBUG [RS:0;a7bef91497aa:36711 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T04:35:17,936 DEBUG [RS:0;a7bef91497aa:36711 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T04:35:17,936 DEBUG [RS_CLOSE_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing b64134db5f9e04e68fa2887624d07f22, disabling compactions & flushes 2024-11-11T04:35:17,936 INFO [RS:0;a7bef91497aa:36711 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-11T04:35:17,936 INFO [RS_CLOSE_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1731299692965.b64134db5f9e04e68fa2887624d07f22. 2024-11-11T04:35:17,936 INFO [RS:0;a7bef91497aa:36711 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-11T04:35:17,936 INFO [RS:0;a7bef91497aa:36711 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-11T04:35:17,936 DEBUG [RS_CLOSE_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731299692965.b64134db5f9e04e68fa2887624d07f22. 2024-11-11T04:35:17,936 INFO [RS:0;a7bef91497aa:36711 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-11T04:35:17,936 DEBUG [RS_CLOSE_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731299692965.b64134db5f9e04e68fa2887624d07f22. after waiting 0 ms 2024-11-11T04:35:17,936 DEBUG [RS_CLOSE_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1731299692965.b64134db5f9e04e68fa2887624d07f22. 2024-11-11T04:35:17,936 INFO [RS:0;a7bef91497aa:36711 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-11T04:35:17,936 DEBUG [RS:0;a7bef91497aa:36711 {}] regionserver.HRegionServer(1325): Online Regions={b64134db5f9e04e68fa2887624d07f22=TestLogRolling-testLogRollOnPipelineRestart,,1731299692965.b64134db5f9e04e68fa2887624d07f22., 1588230740=hbase:meta,,1.1588230740} 2024-11-11T04:35:17,936 DEBUG [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-11T04:35:17,936 DEBUG [RS:0;a7bef91497aa:36711 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, b64134db5f9e04e68fa2887624d07f22 2024-11-11T04:35:17,936 INFO [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-11T04:35:17,937 DEBUG [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-11T04:35:17,937 DEBUG [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-11T04:35:17,937 DEBUG [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-11T04:35:17,941 DEBUG [RS_CLOSE_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/data/default/TestLogRolling-testLogRollOnPipelineRestart/b64134db5f9e04e68fa2887624d07f22/recovered.edits/11.seqid, newMaxSeqId=11, maxSeqId=1 2024-11-11T04:35:17,941 DEBUG [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-11T04:35:17,941 INFO [RS_CLOSE_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731299692965.b64134db5f9e04e68fa2887624d07f22. 2024-11-11T04:35:17,942 DEBUG [RS_CLOSE_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for b64134db5f9e04e68fa2887624d07f22: Waiting for close lock at 1731299717935Running coprocessor pre-close hooks at 1731299717935Disabling compacts and flushes for region at 1731299717935Disabling writes for close at 1731299717936 (+1 ms)Writing region close event to WAL at 1731299717937 (+1 ms)Running coprocessor post-close hooks at 1731299717941 (+4 ms)Closed at 1731299717941 2024-11-11T04:35:17,942 DEBUG [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-11T04:35:17,942 INFO [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-11T04:35:17,942 DEBUG [RS_CLOSE_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731299692965.b64134db5f9e04e68fa2887624d07f22. 2024-11-11T04:35:17,942 DEBUG [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731299717936Running coprocessor pre-close hooks at 1731299717936Disabling compacts and flushes for region at 1731299717936Disabling writes for close at 1731299717937 (+1 ms)Writing region close event to WAL at 1731299717938 (+1 ms)Running coprocessor post-close hooks at 1731299717941 (+3 ms)Closed at 1731299717942 (+1 ms) 2024-11-11T04:35:17,942 DEBUG [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-11T04:35:18,137 INFO [RS:0;a7bef91497aa:36711 {}] regionserver.HRegionServer(976): stopping server a7bef91497aa,36711,1731299691635; all regions closed. 2024-11-11T04:35:18,137 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:35:18,137 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:35:18,137 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:35:18,138 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:35:18,138 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:35:18,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42279 is added to blk_1073741842_1025 (size=825) 2024-11-11T04:35:18,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37773 is added to blk_1073741842_1025 (size=825) 2024-11-11T04:35:18,284 INFO [regionserver/a7bef91497aa:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-11T04:35:18,285 INFO [regionserver/a7bef91497aa:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-11T04:35:18,286 INFO [regionserver/a7bef91497aa:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T04:35:18,393 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:18,401 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:19,393 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:19,402 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:20,394 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:20,402 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:21,395 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:21,403 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:21,574 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-11T04:35:21,804 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741834_1014: GenerationStamp not matched, existing replica is blk_1073741834_1010 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-11T04:35:21,833 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/WALs/a7bef91497aa,36711,1731299691635/a7bef91497aa%2C36711%2C1731299691635.meta.1731299692796.meta after 4001ms 2024-11-11T04:35:21,833 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/WALs/a7bef91497aa,36711,1731299691635/a7bef91497aa%2C36711%2C1731299691635.meta.1731299692796.meta to hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/oldWALs/a7bef91497aa%2C36711%2C1731299691635.meta.1731299692796.meta 2024-11-11T04:35:21,836 DEBUG [RS:0;a7bef91497aa:36711 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/oldWALs 2024-11-11T04:35:21,836 INFO [RS:0;a7bef91497aa:36711 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog a7bef91497aa%2C36711%2C1731299691635.meta:.meta(num 1731299717825) 2024-11-11T04:35:21,837 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:35:21,837 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:35:21,837 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:35:21,837 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:35:21,837 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:35:21,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37773 is added to blk_1073741840_1023 (size=1162) 2024-11-11T04:35:21,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42279 is added to blk_1073741840_1023 (size=1162) 2024-11-11T04:35:21,843 DEBUG [RS:0;a7bef91497aa:36711 {}] wal.AbstractFSWAL(1256): Moved 4 WAL file(s) to /user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/oldWALs 2024-11-11T04:35:21,843 INFO [RS:0;a7bef91497aa:36711 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog a7bef91497aa%2C36711%2C1731299691635:(num 1731299717785) 2024-11-11T04:35:21,843 DEBUG [RS:0;a7bef91497aa:36711 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T04:35:21,843 INFO [RS:0;a7bef91497aa:36711 {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T04:35:21,843 INFO [RS:0;a7bef91497aa:36711 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-11T04:35:21,844 INFO [RS:0;a7bef91497aa:36711 {}] hbase.ChoreService(370): Chore service for: regionserver/a7bef91497aa:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-11T04:35:21,844 INFO [RS:0;a7bef91497aa:36711 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-11T04:35:21,844 INFO [regionserver/a7bef91497aa:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-11T04:35:21,844 INFO [RS:0;a7bef91497aa:36711 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:36711 2024-11-11T04:35:21,846 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36711-0x101959c41a00001, quorum=127.0.0.1:61413, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/a7bef91497aa,36711,1731299691635 2024-11-11T04:35:21,846 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45233-0x101959c41a00000, quorum=127.0.0.1:61413, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-11T04:35:21,846 INFO [RS:0;a7bef91497aa:36711 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-11T04:35:21,848 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [a7bef91497aa,36711,1731299691635] 2024-11-11T04:35:21,851 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/a7bef91497aa,36711,1731299691635 already deleted, retry=false 2024-11-11T04:35:21,851 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; a7bef91497aa,36711,1731299691635 expired; onlineServers=0 2024-11-11T04:35:21,851 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'a7bef91497aa,45233,1731299691590' ***** 2024-11-11T04:35:21,851 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-11T04:35:21,851 INFO [M:0;a7bef91497aa:45233 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-11T04:35:21,852 INFO [M:0;a7bef91497aa:45233 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-11T04:35:21,852 DEBUG [M:0;a7bef91497aa:45233 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-11T04:35:21,852 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-11T04:35:21,852 DEBUG [M:0;a7bef91497aa:45233 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-11T04:35:21,852 DEBUG [master/a7bef91497aa:0:becomeActiveMaster-HFileCleaner.large.0-1731299692191 {}] cleaner.HFileCleaner(306): Exit Thread[master/a7bef91497aa:0:becomeActiveMaster-HFileCleaner.large.0-1731299692191,5,FailOnTimeoutGroup] 2024-11-11T04:35:21,852 DEBUG [master/a7bef91497aa:0:becomeActiveMaster-HFileCleaner.small.0-1731299692191 {}] cleaner.HFileCleaner(306): Exit Thread[master/a7bef91497aa:0:becomeActiveMaster-HFileCleaner.small.0-1731299692191,5,FailOnTimeoutGroup] 2024-11-11T04:35:21,852 INFO [M:0;a7bef91497aa:45233 {}] hbase.ChoreService(370): Chore service for: master/a7bef91497aa:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-11T04:35:21,852 INFO [M:0;a7bef91497aa:45233 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-11T04:35:21,852 DEBUG [M:0;a7bef91497aa:45233 {}] master.HMaster(1795): Stopping service threads 2024-11-11T04:35:21,852 INFO [M:0;a7bef91497aa:45233 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-11T04:35:21,852 INFO [M:0;a7bef91497aa:45233 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-11T04:35:21,853 INFO [M:0;a7bef91497aa:45233 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-11T04:35:21,853 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-11T04:35:21,854 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45233-0x101959c41a00000, quorum=127.0.0.1:61413, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-11T04:35:21,854 DEBUG [M:0;a7bef91497aa:45233 {}] zookeeper.ZKUtil(347): master:45233-0x101959c41a00000, quorum=127.0.0.1:61413, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-11T04:35:21,854 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45233-0x101959c41a00000, quorum=127.0.0.1:61413, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:35:21,854 WARN [M:0;a7bef91497aa:45233 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-11T04:35:21,854 INFO [M:0;a7bef91497aa:45233 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/.lastflushedseqids 2024-11-11T04:35:21,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37773 is added to blk_1073741846_1030 (size=111) 2024-11-11T04:35:21,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42279 is added to blk_1073741846_1030 (size=111) 2024-11-11T04:35:21,860 INFO [M:0;a7bef91497aa:45233 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-11T04:35:21,860 INFO [M:0;a7bef91497aa:45233 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-11T04:35:21,861 DEBUG [M:0;a7bef91497aa:45233 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-11T04:35:21,861 INFO [M:0;a7bef91497aa:45233 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T04:35:21,861 DEBUG [M:0;a7bef91497aa:45233 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T04:35:21,861 DEBUG [M:0;a7bef91497aa:45233 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-11T04:35:21,861 DEBUG [M:0;a7bef91497aa:45233 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T04:35:21,861 INFO [M:0;a7bef91497aa:45233 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.17 KB heapSize=29.16 KB 2024-11-11T04:35:21,861 ERROR [FSHLog-0-hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/MasterData-prefix:a7bef91497aa,45233,1731299691590 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43511,DS-6566c4a9-3245-49c8-bf5f-7b1a9d93becb,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:35:21,861 WARN [FSHLog-0-hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/MasterData-prefix:a7bef91497aa,45233,1731299691590 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43511,DS-6566c4a9-3245-49c8-bf5f-7b1a9d93becb,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:35:21,861 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog a7bef91497aa%2C45233%2C1731299691590:(num 1731299692120) roll requested 2024-11-11T04:35:21,862 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor a7bef91497aa%2C45233%2C1731299691590.1731299721862 2024-11-11T04:35:21,866 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:35:21,866 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:35:21,866 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:35:21,867 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:35:21,867 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:35:21,867 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/MasterData/WALs/a7bef91497aa,45233,1731299691590/a7bef91497aa%2C45233%2C1731299691590.1731299692120 with entries=53, filesize=26.62 KB; new WAL /user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/MasterData/WALs/a7bef91497aa,45233,1731299691590/a7bef91497aa%2C45233%2C1731299691590.1731299721862 2024-11-11T04:35:21,867 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43511,DS-6566c4a9-3245-49c8-bf5f-7b1a9d93becb,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:35:21,867 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43511,DS-6566c4a9-3245-49c8-bf5f-7b1a9d93becb,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T04:35:21,867 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/MasterData/WALs/a7bef91497aa,45233,1731299691590/a7bef91497aa%2C45233%2C1731299691590.1731299692120 2024-11-11T04:35:21,868 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46313:46313),(127.0.0.1/127.0.0.1:40665:40665)] 2024-11-11T04:35:21,868 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/MasterData/WALs/a7bef91497aa,45233,1731299691590/a7bef91497aa%2C45233%2C1731299691590.1731299692120 is not closed yet, will try archiving it next time 2024-11-11T04:35:21,868 WARN [IPC Server handler 0 on default port 45989 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/MasterData/WALs/a7bef91497aa,45233,1731299691590/a7bef91497aa%2C45233%2C1731299691590.1731299692120 has not been closed. Lease recovery is in progress. RecoveryId = 1032 for block blk_1073741830_1015 2024-11-11T04:35:21,868 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/MasterData/WALs/a7bef91497aa,45233,1731299691590/a7bef91497aa%2C45233%2C1731299691590.1731299692120 after 1ms 2024-11-11T04:35:21,882 DEBUG [M:0;a7bef91497aa:45233 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/9dd2f5ee937f4d77ac6ab4e1db603029 is 82, key is hbase:meta,,1/info:regioninfo/1731299692825/Put/seqid=0 2024-11-11T04:35:21,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37773 is added to blk_1073741848_1033 (size=5672) 2024-11-11T04:35:21,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42279 is added to blk_1073741848_1033 (size=5672) 2024-11-11T04:35:21,888 INFO [M:0;a7bef91497aa:45233 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/9dd2f5ee937f4d77ac6ab4e1db603029 2024-11-11T04:35:21,907 DEBUG [M:0;a7bef91497aa:45233 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/db3a5d8d991441639d10424291df3cfa is 778, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731299693335/Put/seqid=0 2024-11-11T04:35:21,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37773 is added to blk_1073741849_1034 (size=6118) 2024-11-11T04:35:21,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42279 is added to blk_1073741849_1034 (size=6118) 2024-11-11T04:35:21,913 INFO [M:0;a7bef91497aa:45233 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.57 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/db3a5d8d991441639d10424291df3cfa 2024-11-11T04:35:21,931 DEBUG [M:0;a7bef91497aa:45233 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/33888751c51c4f96a4d3757fa4ec1362 is 69, key is a7bef91497aa,36711,1731299691635/rs:state/1731299692270/Put/seqid=0 2024-11-11T04:35:21,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42279 is added to blk_1073741850_1035 (size=5156) 2024-11-11T04:35:21,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37773 is added to blk_1073741850_1035 (size=5156) 2024-11-11T04:35:21,937 INFO [M:0;a7bef91497aa:45233 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/33888751c51c4f96a4d3757fa4ec1362 2024-11-11T04:35:21,949 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36711-0x101959c41a00001, quorum=127.0.0.1:61413, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T04:35:21,949 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36711-0x101959c41a00001, quorum=127.0.0.1:61413, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T04:35:21,949 INFO [RS:0;a7bef91497aa:36711 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-11T04:35:21,949 INFO [RS:0;a7bef91497aa:36711 {}] regionserver.HRegionServer(1031): Exiting; stopping=a7bef91497aa,36711,1731299691635; zookeeper connection closed. 2024-11-11T04:35:21,949 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1c6787f6 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1c6787f6 2024-11-11T04:35:21,949 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-11T04:35:21,955 DEBUG [M:0;a7bef91497aa:45233 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/0cd8e0cc600c41ccb605850392a454aa is 52, key is load_balancer_on/state:d/1731299692961/Put/seqid=0 2024-11-11T04:35:21,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42279 is added to blk_1073741851_1036 (size=5056) 2024-11-11T04:35:21,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37773 is added to blk_1073741851_1036 (size=5056) 2024-11-11T04:35:21,960 INFO [M:0;a7bef91497aa:45233 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/0cd8e0cc600c41ccb605850392a454aa 2024-11-11T04:35:21,966 DEBUG [M:0;a7bef91497aa:45233 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/9dd2f5ee937f4d77ac6ab4e1db603029 as hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/9dd2f5ee937f4d77ac6ab4e1db603029 2024-11-11T04:35:21,970 INFO [M:0;a7bef91497aa:45233 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/9dd2f5ee937f4d77ac6ab4e1db603029, entries=8, sequenceid=56, filesize=5.5 K 2024-11-11T04:35:21,971 DEBUG [M:0;a7bef91497aa:45233 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/db3a5d8d991441639d10424291df3cfa as hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/db3a5d8d991441639d10424291df3cfa 2024-11-11T04:35:21,975 INFO [M:0;a7bef91497aa:45233 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/db3a5d8d991441639d10424291df3cfa, entries=6, sequenceid=56, filesize=6.0 K 2024-11-11T04:35:21,976 DEBUG [M:0;a7bef91497aa:45233 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/33888751c51c4f96a4d3757fa4ec1362 as hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/33888751c51c4f96a4d3757fa4ec1362 2024-11-11T04:35:21,981 INFO [M:0;a7bef91497aa:45233 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/33888751c51c4f96a4d3757fa4ec1362, entries=1, sequenceid=56, filesize=5.0 K 2024-11-11T04:35:21,982 DEBUG [M:0;a7bef91497aa:45233 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/0cd8e0cc600c41ccb605850392a454aa as hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/0cd8e0cc600c41ccb605850392a454aa 2024-11-11T04:35:21,987 INFO [M:0;a7bef91497aa:45233 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/0cd8e0cc600c41ccb605850392a454aa, entries=1, sequenceid=56, filesize=4.9 K 2024-11-11T04:35:21,988 INFO [M:0;a7bef91497aa:45233 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.17 KB/23726, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 127ms, sequenceid=56, compaction requested=false 2024-11-11T04:35:21,990 INFO [M:0;a7bef91497aa:45233 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T04:35:21,990 DEBUG [M:0;a7bef91497aa:45233 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731299721861Disabling compacts and flushes for region at 1731299721861Disabling writes for close at 1731299721861Obtaining lock to block concurrent updates at 1731299721861Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731299721861Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23726, getHeapSize=29800, getOffHeapSize=0, getCellsCount=67 at 1731299721861Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731299721868 (+7 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731299721868Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731299721882 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731299721882Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731299721893 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731299721907 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731299721907Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731299721918 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731299721931 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731299721931Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731299721941 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731299721955 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731299721955Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2e8dd4c7: reopening flushed file at 1731299721965 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@605408e0: reopening flushed file at 1731299721970 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6f715b16: reopening flushed file at 1731299721976 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7987470b: reopening flushed file at 1731299721981 (+5 ms)Finished flush of dataSize ~23.17 KB/23726, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 127ms, sequenceid=56, compaction requested=false at 1731299721988 (+7 ms)Writing region close event to WAL at 1731299721990 (+2 ms)Closed at 1731299721990 2024-11-11T04:35:21,990 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:35:21,990 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:35:21,990 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:35:21,990 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:35:21,991 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:35:21,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37773 is added to blk_1073741847_1031 (size=757) 2024-11-11T04:35:21,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42279 is added to blk_1073741847_1031 (size=757) 2024-11-11T04:35:22,395 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:22,403 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:22,942 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:35:22,942 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:35:22,956 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:35:22,956 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:35:22,956 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:35:22,956 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:35:22,957 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:35:22,957 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:35:22,960 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:35:22,960 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:35:22,960 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:35:22,962 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:35:22,966 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:35:22,966 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:35:23,396 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:23,403 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:23,468 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-11T04:35:23,470 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:35:23,470 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:35:23,470 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:35:23,470 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:35:23,486 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:35:23,486 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:35:23,486 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:35:23,487 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:35:23,487 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:35:23,487 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:35:23,490 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:35:23,490 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:35:23,490 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:35:23,492 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:35:24,396 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:24,404 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:24,805 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741830_1015: GenerationStamp not matched, existing replica is blk_1073741830_1006 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-11T04:35:25,397 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:25,404 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:25,869 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/MasterData/WALs/a7bef91497aa,45233,1731299691590/a7bef91497aa%2C45233%2C1731299691590.1731299692120 after 4002ms 2024-11-11T04:35:25,869 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/MasterData/WALs/a7bef91497aa,45233,1731299691590/a7bef91497aa%2C45233%2C1731299691590.1731299692120 to hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/MasterData/oldWALs/a7bef91497aa%2C45233%2C1731299691590.1731299692120 2024-11-11T04:35:25,872 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/MasterData/oldWALs/a7bef91497aa%2C45233%2C1731299691590.1731299692120 to hdfs://localhost:45989/user/jenkins/test-data/791702fb-7d72-0651-26d2-d580f7082a94/oldWALs/a7bef91497aa%2C45233%2C1731299691590.1731299692120$masterlocalwal$ 2024-11-11T04:35:25,872 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-11T04:35:25,872 INFO [M:0;a7bef91497aa:45233 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-11T04:35:25,872 INFO [M:0;a7bef91497aa:45233 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45233 2024-11-11T04:35:25,873 INFO [M:0;a7bef91497aa:45233 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-11T04:35:25,974 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45233-0x101959c41a00000, quorum=127.0.0.1:61413, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T04:35:25,974 INFO [M:0;a7bef91497aa:45233 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-11T04:35:25,975 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45233-0x101959c41a00000, quorum=127.0.0.1:61413, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T04:35:25,977 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@474594d6{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T04:35:25,977 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3cea2bec{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T04:35:25,977 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T04:35:25,977 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1dad3af2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T04:35:25,977 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@42bf2aaa{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88b5afca-deb2-5599-4ad7-aea3a40a678b/hadoop.log.dir/,STOPPED} 2024-11-11T04:35:25,979 WARN [BP-1384133828-172.17.0.2-1731299690940 heartbeating to localhost/127.0.0.1:45989 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T04:35:25,979 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T04:35:25,979 WARN [BP-1384133828-172.17.0.2-1731299690940 heartbeating to localhost/127.0.0.1:45989 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1384133828-172.17.0.2-1731299690940 (Datanode Uuid 63425e5e-0ac8-4381-82ea-5b5425f0f552) service to localhost/127.0.0.1:45989 2024-11-11T04:35:25,979 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T04:35:25,979 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88b5afca-deb2-5599-4ad7-aea3a40a678b/cluster_aaae5ed2-a833-51e3-3a29-3cd15d4b1b16/data/data3/current/BP-1384133828-172.17.0.2-1731299690940 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T04:35:25,979 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88b5afca-deb2-5599-4ad7-aea3a40a678b/cluster_aaae5ed2-a833-51e3-3a29-3cd15d4b1b16/data/data4/current/BP-1384133828-172.17.0.2-1731299690940 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T04:35:25,980 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T04:35:25,981 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1223970{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T04:35:25,982 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@282cfbc9{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T04:35:25,982 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T04:35:25,982 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3a397072{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T04:35:25,982 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7c83d523{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88b5afca-deb2-5599-4ad7-aea3a40a678b/hadoop.log.dir/,STOPPED} 2024-11-11T04:35:25,983 WARN [BP-1384133828-172.17.0.2-1731299690940 heartbeating to localhost/127.0.0.1:45989 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T04:35:25,983 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T04:35:25,983 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T04:35:25,983 WARN [BP-1384133828-172.17.0.2-1731299690940 heartbeating to localhost/127.0.0.1:45989 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1384133828-172.17.0.2-1731299690940 (Datanode Uuid 5bdc0d2d-5e97-4e14-9cd6-3d3c6eabc44e) service to localhost/127.0.0.1:45989 2024-11-11T04:35:25,984 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T04:35:25,984 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88b5afca-deb2-5599-4ad7-aea3a40a678b/cluster_aaae5ed2-a833-51e3-3a29-3cd15d4b1b16/data/data1/current/BP-1384133828-172.17.0.2-1731299690940 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T04:35:25,984 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88b5afca-deb2-5599-4ad7-aea3a40a678b/cluster_aaae5ed2-a833-51e3-3a29-3cd15d4b1b16/data/data2/current/BP-1384133828-172.17.0.2-1731299690940 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T04:35:25,989 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3e84569b{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-11T04:35:25,990 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@ea144a0{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T04:35:25,990 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T04:35:25,990 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6d60493b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T04:35:25,990 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6495f923{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88b5afca-deb2-5599-4ad7-aea3a40a678b/hadoop.log.dir/,STOPPED} 2024-11-11T04:35:25,996 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-11T04:35:26,013 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-11T04:35:26,021 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=179 (was 154) Potentially hanging thread: nioEventLoopGroup-30-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45989 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45989 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:45989 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45989 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.4@localhost:45989 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:45989 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:45989 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:45989 from jenkins.hfs.4 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-10-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=457 (was 452) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=9 (was 7) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=6630 (was 6778) 2024-11-11T04:35:26,028 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=179, OpenFileDescriptor=457, MaxFileDescriptor=1048576, SystemLoadAverage=9, ProcessCount=11, AvailableMemoryMB=6630 2024-11-11T04:35:26,028 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-11T04:35:26,028 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88b5afca-deb2-5599-4ad7-aea3a40a678b/hadoop.log.dir so I do NOT create it in target/test-data/08105350-1506-3bfb-d3b2-39b6807a19d8 2024-11-11T04:35:26,028 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/88b5afca-deb2-5599-4ad7-aea3a40a678b/hadoop.tmp.dir so I do NOT create it in target/test-data/08105350-1506-3bfb-d3b2-39b6807a19d8 2024-11-11T04:35:26,029 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/08105350-1506-3bfb-d3b2-39b6807a19d8/cluster_c999db2f-a4c1-1db4-9a28-027f8575ecbe, deleteOnExit=true 2024-11-11T04:35:26,029 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-11T04:35:26,029 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/08105350-1506-3bfb-d3b2-39b6807a19d8/test.cache.data in system properties and HBase conf 2024-11-11T04:35:26,029 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/08105350-1506-3bfb-d3b2-39b6807a19d8/hadoop.tmp.dir in system properties and HBase conf 2024-11-11T04:35:26,029 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/08105350-1506-3bfb-d3b2-39b6807a19d8/hadoop.log.dir in system properties and HBase conf 2024-11-11T04:35:26,029 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/08105350-1506-3bfb-d3b2-39b6807a19d8/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-11T04:35:26,029 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/08105350-1506-3bfb-d3b2-39b6807a19d8/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-11T04:35:26,029 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-11T04:35:26,029 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-11T04:35:26,029 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/08105350-1506-3bfb-d3b2-39b6807a19d8/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-11T04:35:26,029 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/08105350-1506-3bfb-d3b2-39b6807a19d8/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-11T04:35:26,029 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/08105350-1506-3bfb-d3b2-39b6807a19d8/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-11T04:35:26,030 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/08105350-1506-3bfb-d3b2-39b6807a19d8/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-11T04:35:26,030 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/08105350-1506-3bfb-d3b2-39b6807a19d8/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-11T04:35:26,030 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/08105350-1506-3bfb-d3b2-39b6807a19d8/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-11T04:35:26,030 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/08105350-1506-3bfb-d3b2-39b6807a19d8/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-11T04:35:26,030 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/08105350-1506-3bfb-d3b2-39b6807a19d8/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-11T04:35:26,030 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/08105350-1506-3bfb-d3b2-39b6807a19d8/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-11T04:35:26,030 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/08105350-1506-3bfb-d3b2-39b6807a19d8/nfs.dump.dir in system properties and HBase conf 2024-11-11T04:35:26,030 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/08105350-1506-3bfb-d3b2-39b6807a19d8/java.io.tmpdir in system properties and HBase conf 2024-11-11T04:35:26,030 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/08105350-1506-3bfb-d3b2-39b6807a19d8/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-11T04:35:26,030 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/08105350-1506-3bfb-d3b2-39b6807a19d8/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-11T04:35:26,030 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/08105350-1506-3bfb-d3b2-39b6807a19d8/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-11T04:35:26,043 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-11T04:35:26,104 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T04:35:26,108 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T04:35:26,109 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T04:35:26,109 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T04:35:26,109 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-11T04:35:26,110 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T04:35:26,110 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@79c156a5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/08105350-1506-3bfb-d3b2-39b6807a19d8/hadoop.log.dir/,AVAILABLE} 2024-11-11T04:35:26,111 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@232381c8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T04:35:26,222 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@c464107{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/08105350-1506-3bfb-d3b2-39b6807a19d8/java.io.tmpdir/jetty-localhost-39993-hadoop-hdfs-3_4_1-tests_jar-_-any-4058688059954508355/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-11T04:35:26,222 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@62558ec9{HTTP/1.1, (http/1.1)}{localhost:39993} 2024-11-11T04:35:26,223 INFO [Time-limited test {}] server.Server(415): Started @184025ms 2024-11-11T04:35:26,235 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-11T04:35:26,284 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T04:35:26,287 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T04:35:26,288 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T04:35:26,288 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T04:35:26,288 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-11T04:35:26,289 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2ac0122b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/08105350-1506-3bfb-d3b2-39b6807a19d8/hadoop.log.dir/,AVAILABLE} 2024-11-11T04:35:26,289 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3fa18b90{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T04:35:26,398 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:26,399 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@8d4c846{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/08105350-1506-3bfb-d3b2-39b6807a19d8/java.io.tmpdir/jetty-localhost-43429-hadoop-hdfs-3_4_1-tests_jar-_-any-5737275127687294413/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T04:35:26,400 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1330929b{HTTP/1.1, (http/1.1)}{localhost:43429} 2024-11-11T04:35:26,400 INFO [Time-limited test {}] server.Server(415): Started @184202ms 2024-11-11T04:35:26,401 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T04:35:26,405 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:26,429 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T04:35:26,432 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T04:35:26,433 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T04:35:26,433 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T04:35:26,433 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-11T04:35:26,433 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@39212263{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/08105350-1506-3bfb-d3b2-39b6807a19d8/hadoop.log.dir/,AVAILABLE} 2024-11-11T04:35:26,433 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2fa194e5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T04:35:26,490 WARN [Thread-1646 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/08105350-1506-3bfb-d3b2-39b6807a19d8/cluster_c999db2f-a4c1-1db4-9a28-027f8575ecbe/data/data1/current/BP-601493080-172.17.0.2-1731299726058/current, will proceed with Du for space computation calculation, 2024-11-11T04:35:26,490 WARN [Thread-1647 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/08105350-1506-3bfb-d3b2-39b6807a19d8/cluster_c999db2f-a4c1-1db4-9a28-027f8575ecbe/data/data2/current/BP-601493080-172.17.0.2-1731299726058/current, will proceed with Du for space computation calculation, 2024-11-11T04:35:26,507 WARN [Thread-1625 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T04:35:26,510 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xbd529225b98d6870 with lease ID 0x1c5356157e4a08cc: Processing first storage report for DS-34f2b290-9f4b-44fb-b482-9f95507fa8a3 from datanode DatanodeRegistration(127.0.0.1:37899, datanodeUuid=184fd366-2b2a-44b5-8958-3f8c8ec3d828, infoPort=46703, infoSecurePort=0, ipcPort=34661, storageInfo=lv=-57;cid=testClusterID;nsid=1850437526;c=1731299726058) 2024-11-11T04:35:26,510 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbd529225b98d6870 with lease ID 0x1c5356157e4a08cc: from storage DS-34f2b290-9f4b-44fb-b482-9f95507fa8a3 node DatanodeRegistration(127.0.0.1:37899, datanodeUuid=184fd366-2b2a-44b5-8958-3f8c8ec3d828, infoPort=46703, infoSecurePort=0, ipcPort=34661, storageInfo=lv=-57;cid=testClusterID;nsid=1850437526;c=1731299726058), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T04:35:26,510 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xbd529225b98d6870 with lease ID 0x1c5356157e4a08cc: Processing first storage report for DS-55f1097f-2756-47c4-9f0d-053fee223424 from datanode DatanodeRegistration(127.0.0.1:37899, datanodeUuid=184fd366-2b2a-44b5-8958-3f8c8ec3d828, infoPort=46703, infoSecurePort=0, ipcPort=34661, storageInfo=lv=-57;cid=testClusterID;nsid=1850437526;c=1731299726058) 2024-11-11T04:35:26,510 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbd529225b98d6870 with lease ID 0x1c5356157e4a08cc: from storage DS-55f1097f-2756-47c4-9f0d-053fee223424 node DatanodeRegistration(127.0.0.1:37899, datanodeUuid=184fd366-2b2a-44b5-8958-3f8c8ec3d828, infoPort=46703, infoSecurePort=0, ipcPort=34661, storageInfo=lv=-57;cid=testClusterID;nsid=1850437526;c=1731299726058), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T04:35:26,546 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@22fd1e24{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/08105350-1506-3bfb-d3b2-39b6807a19d8/java.io.tmpdir/jetty-localhost-43525-hadoop-hdfs-3_4_1-tests_jar-_-any-15370602809469146430/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T04:35:26,546 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@43498b11{HTTP/1.1, (http/1.1)}{localhost:43525} 2024-11-11T04:35:26,546 INFO [Time-limited test {}] server.Server(415): Started @184349ms 2024-11-11T04:35:26,548 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T04:35:26,642 WARN [Thread-1672 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/08105350-1506-3bfb-d3b2-39b6807a19d8/cluster_c999db2f-a4c1-1db4-9a28-027f8575ecbe/data/data3/current/BP-601493080-172.17.0.2-1731299726058/current, will proceed with Du for space computation calculation, 2024-11-11T04:35:26,642 WARN [Thread-1673 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/08105350-1506-3bfb-d3b2-39b6807a19d8/cluster_c999db2f-a4c1-1db4-9a28-027f8575ecbe/data/data4/current/BP-601493080-172.17.0.2-1731299726058/current, will proceed with Du for space computation calculation, 2024-11-11T04:35:26,657 WARN [Thread-1661 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T04:35:26,659 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3240bd7de2cc26e1 with lease ID 0x1c5356157e4a08cd: Processing first storage report for DS-7f110094-2882-47fe-85b4-ad08318550e9 from datanode DatanodeRegistration(127.0.0.1:44287, datanodeUuid=61e3f1dd-81d1-49a9-a0b8-0b9350008616, infoPort=41459, infoSecurePort=0, ipcPort=43557, storageInfo=lv=-57;cid=testClusterID;nsid=1850437526;c=1731299726058) 2024-11-11T04:35:26,659 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3240bd7de2cc26e1 with lease ID 0x1c5356157e4a08cd: from storage DS-7f110094-2882-47fe-85b4-ad08318550e9 node DatanodeRegistration(127.0.0.1:44287, datanodeUuid=61e3f1dd-81d1-49a9-a0b8-0b9350008616, infoPort=41459, infoSecurePort=0, ipcPort=43557, storageInfo=lv=-57;cid=testClusterID;nsid=1850437526;c=1731299726058), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T04:35:26,659 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3240bd7de2cc26e1 with lease ID 0x1c5356157e4a08cd: Processing first storage report for DS-9e3499e2-4f92-4c02-9aa3-a984213ca501 from datanode DatanodeRegistration(127.0.0.1:44287, datanodeUuid=61e3f1dd-81d1-49a9-a0b8-0b9350008616, infoPort=41459, infoSecurePort=0, ipcPort=43557, storageInfo=lv=-57;cid=testClusterID;nsid=1850437526;c=1731299726058) 2024-11-11T04:35:26,659 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3240bd7de2cc26e1 with lease ID 0x1c5356157e4a08cd: from storage DS-9e3499e2-4f92-4c02-9aa3-a984213ca501 node DatanodeRegistration(127.0.0.1:44287, datanodeUuid=61e3f1dd-81d1-49a9-a0b8-0b9350008616, infoPort=41459, infoSecurePort=0, ipcPort=43557, storageInfo=lv=-57;cid=testClusterID;nsid=1850437526;c=1731299726058), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T04:35:26,670 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/08105350-1506-3bfb-d3b2-39b6807a19d8 2024-11-11T04:35:26,673 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/08105350-1506-3bfb-d3b2-39b6807a19d8/cluster_c999db2f-a4c1-1db4-9a28-027f8575ecbe/zookeeper_0, clientPort=53725, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/08105350-1506-3bfb-d3b2-39b6807a19d8/cluster_c999db2f-a4c1-1db4-9a28-027f8575ecbe/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/08105350-1506-3bfb-d3b2-39b6807a19d8/cluster_c999db2f-a4c1-1db4-9a28-027f8575ecbe/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-11T04:35:26,673 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=53725 2024-11-11T04:35:26,674 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T04:35:26,675 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T04:35:26,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44287 is added to blk_1073741825_1001 (size=7) 2024-11-11T04:35:26,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37899 is added to blk_1073741825_1001 (size=7) 2024-11-11T04:35:26,684 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d with version=8 2024-11-11T04:35:26,684 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/hbase-staging 2024-11-11T04:35:26,686 INFO [Time-limited test {}] client.ConnectionUtils(128): master/a7bef91497aa:0 server-side Connection retries=45 2024-11-11T04:35:26,686 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T04:35:26,686 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-11T04:35:26,686 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-11T04:35:26,686 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T04:35:26,686 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-11T04:35:26,686 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-11T04:35:26,686 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-11T04:35:26,687 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46241 2024-11-11T04:35:26,688 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:46241 connecting to ZooKeeper ensemble=127.0.0.1:53725 2024-11-11T04:35:26,694 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:462410x0, quorum=127.0.0.1:53725, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-11T04:35:26,695 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:46241-0x101959ccab90000 connected 2024-11-11T04:35:26,708 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T04:35:26,710 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T04:35:26,711 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46241-0x101959ccab90000, quorum=127.0.0.1:53725, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T04:35:26,712 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d, hbase.cluster.distributed=false 2024-11-11T04:35:26,713 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46241-0x101959ccab90000, quorum=127.0.0.1:53725, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-11T04:35:26,713 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46241 2024-11-11T04:35:26,714 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46241 2024-11-11T04:35:26,714 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46241 2024-11-11T04:35:26,714 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46241 2024-11-11T04:35:26,714 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46241 2024-11-11T04:35:26,729 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/a7bef91497aa:0 server-side Connection retries=45 2024-11-11T04:35:26,729 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T04:35:26,729 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-11T04:35:26,729 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-11T04:35:26,729 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T04:35:26,729 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-11T04:35:26,729 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-11T04:35:26,729 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-11T04:35:26,730 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34791 2024-11-11T04:35:26,731 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:34791 connecting to ZooKeeper ensemble=127.0.0.1:53725 2024-11-11T04:35:26,732 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T04:35:26,733 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T04:35:26,737 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:347910x0, quorum=127.0.0.1:53725, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-11T04:35:26,737 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34791-0x101959ccab90001, quorum=127.0.0.1:53725, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T04:35:26,737 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:34791-0x101959ccab90001 connected 2024-11-11T04:35:26,737 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-11T04:35:26,738 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-11T04:35:26,739 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34791-0x101959ccab90001, quorum=127.0.0.1:53725, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-11T04:35:26,739 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34791-0x101959ccab90001, quorum=127.0.0.1:53725, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-11T04:35:26,741 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34791 2024-11-11T04:35:26,741 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34791 2024-11-11T04:35:26,742 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34791 2024-11-11T04:35:26,742 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34791 2024-11-11T04:35:26,742 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34791 2024-11-11T04:35:26,753 DEBUG [M:0;a7bef91497aa:46241 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;a7bef91497aa:46241 2024-11-11T04:35:26,753 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/a7bef91497aa,46241,1731299726686 2024-11-11T04:35:26,755 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46241-0x101959ccab90000, quorum=127.0.0.1:53725, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T04:35:26,755 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34791-0x101959ccab90001, quorum=127.0.0.1:53725, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T04:35:26,756 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46241-0x101959ccab90000, quorum=127.0.0.1:53725, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/a7bef91497aa,46241,1731299726686 2024-11-11T04:35:26,758 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34791-0x101959ccab90001, quorum=127.0.0.1:53725, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-11T04:35:26,758 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46241-0x101959ccab90000, quorum=127.0.0.1:53725, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:35:26,758 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34791-0x101959ccab90001, quorum=127.0.0.1:53725, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:35:26,758 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46241-0x101959ccab90000, quorum=127.0.0.1:53725, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-11T04:35:26,759 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/a7bef91497aa,46241,1731299726686 from backup master directory 2024-11-11T04:35:26,760 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46241-0x101959ccab90000, quorum=127.0.0.1:53725, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/a7bef91497aa,46241,1731299726686 2024-11-11T04:35:26,760 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34791-0x101959ccab90001, quorum=127.0.0.1:53725, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T04:35:26,760 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46241-0x101959ccab90000, quorum=127.0.0.1:53725, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T04:35:26,760 WARN [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-11T04:35:26,761 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=a7bef91497aa,46241,1731299726686 2024-11-11T04:35:26,765 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/hbase.id] with ID: 4f457901-c245-4348-8fb1-e18ca6c89528 2024-11-11T04:35:26,765 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/.tmp/hbase.id 2024-11-11T04:35:26,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37899 is added to blk_1073741826_1002 (size=42) 2024-11-11T04:35:26,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44287 is added to blk_1073741826_1002 (size=42) 2024-11-11T04:35:26,771 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/.tmp/hbase.id]:[hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/hbase.id] 2024-11-11T04:35:26,781 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T04:35:26,781 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-11T04:35:26,783 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-11T04:35:26,785 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34791-0x101959ccab90001, quorum=127.0.0.1:53725, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:35:26,785 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46241-0x101959ccab90000, quorum=127.0.0.1:53725, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:35:26,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44287 is added to blk_1073741827_1003 (size=196) 2024-11-11T04:35:26,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37899 is added to blk_1073741827_1003 (size=196) 2024-11-11T04:35:26,795 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-11T04:35:26,796 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-11T04:35:26,796 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T04:35:26,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37899 is added to blk_1073741828_1004 (size=1189) 2024-11-11T04:35:26,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44287 is added to blk_1073741828_1004 (size=1189) 2024-11-11T04:35:26,805 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/MasterData/data/master/store 2024-11-11T04:35:26,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37899 is added to blk_1073741829_1005 (size=34) 2024-11-11T04:35:26,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44287 is added to blk_1073741829_1005 (size=34) 2024-11-11T04:35:26,812 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T04:35:26,812 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-11T04:35:26,812 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T04:35:26,812 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T04:35:26,812 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-11T04:35:26,812 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T04:35:26,812 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T04:35:26,812 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731299726812Disabling compacts and flushes for region at 1731299726812Disabling writes for close at 1731299726812Writing region close event to WAL at 1731299726812Closed at 1731299726812 2024-11-11T04:35:26,813 WARN [master/a7bef91497aa:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/MasterData/data/master/store/.initializing 2024-11-11T04:35:26,813 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/MasterData/WALs/a7bef91497aa,46241,1731299726686 2024-11-11T04:35:26,816 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a7bef91497aa%2C46241%2C1731299726686, suffix=, logDir=hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/MasterData/WALs/a7bef91497aa,46241,1731299726686, archiveDir=hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/MasterData/oldWALs, maxLogs=10 2024-11-11T04:35:26,816 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor a7bef91497aa%2C46241%2C1731299726686.1731299726816 2024-11-11T04:35:26,820 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/MasterData/WALs/a7bef91497aa,46241,1731299726686/a7bef91497aa%2C46241%2C1731299726686.1731299726816 2024-11-11T04:35:26,821 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46703:46703),(127.0.0.1/127.0.0.1:41459:41459)] 2024-11-11T04:35:26,821 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-11T04:35:26,821 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T04:35:26,822 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:35:26,822 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:35:26,823 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:35:26,824 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-11T04:35:26,824 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:35:26,824 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T04:35:26,824 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:35:26,825 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-11T04:35:26,825 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:35:26,826 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T04:35:26,826 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:35:26,827 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-11T04:35:26,827 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:35:26,827 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T04:35:26,827 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:35:26,828 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-11T04:35:26,828 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:35:26,829 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T04:35:26,829 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:35:26,829 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:35:26,830 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:35:26,831 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:35:26,831 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:35:26,832 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-11T04:35:26,833 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:35:26,835 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T04:35:26,835 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=772403, jitterRate=-0.01783926784992218}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-11T04:35:26,836 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731299726822Initializing all the Stores at 1731299726822Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731299726822Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731299726822Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731299726822Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731299726822Cleaning up temporary data from old regions at 1731299726831 (+9 ms)Region opened successfully at 1731299726836 (+5 ms) 2024-11-11T04:35:26,836 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-11T04:35:26,839 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@460a1618, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=a7bef91497aa/172.17.0.2:0 2024-11-11T04:35:26,840 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-11T04:35:26,840 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-11T04:35:26,840 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-11T04:35:26,840 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-11T04:35:26,841 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-11T04:35:26,841 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-11T04:35:26,841 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-11T04:35:26,843 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-11T04:35:26,844 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46241-0x101959ccab90000, quorum=127.0.0.1:53725, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-11T04:35:26,845 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-11T04:35:26,846 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-11T04:35:26,846 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46241-0x101959ccab90000, quorum=127.0.0.1:53725, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-11T04:35:26,849 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-11T04:35:26,849 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-11T04:35:26,850 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46241-0x101959ccab90000, quorum=127.0.0.1:53725, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-11T04:35:26,852 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-11T04:35:26,852 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46241-0x101959ccab90000, quorum=127.0.0.1:53725, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-11T04:35:26,853 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-11T04:35:26,855 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46241-0x101959ccab90000, quorum=127.0.0.1:53725, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-11T04:35:26,856 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-11T04:35:26,858 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34791-0x101959ccab90001, quorum=127.0.0.1:53725, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-11T04:35:26,858 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46241-0x101959ccab90000, quorum=127.0.0.1:53725, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-11T04:35:26,858 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34791-0x101959ccab90001, quorum=127.0.0.1:53725, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:35:26,858 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46241-0x101959ccab90000, quorum=127.0.0.1:53725, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:35:26,859 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=a7bef91497aa,46241,1731299726686, sessionid=0x101959ccab90000, setting cluster-up flag (Was=false) 2024-11-11T04:35:26,863 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34791-0x101959ccab90001, quorum=127.0.0.1:53725, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:35:26,863 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46241-0x101959ccab90000, quorum=127.0.0.1:53725, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:35:26,868 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-11T04:35:26,869 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=a7bef91497aa,46241,1731299726686 2024-11-11T04:35:26,873 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34791-0x101959ccab90001, quorum=127.0.0.1:53725, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:35:26,873 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46241-0x101959ccab90000, quorum=127.0.0.1:53725, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:35:26,879 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-11T04:35:26,879 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=a7bef91497aa,46241,1731299726686 2024-11-11T04:35:26,880 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-11T04:35:26,882 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-11T04:35:26,882 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-11T04:35:26,882 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-11T04:35:26,882 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: a7bef91497aa,46241,1731299726686 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-11T04:35:26,883 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/a7bef91497aa:0, corePoolSize=5, maxPoolSize=5 2024-11-11T04:35:26,883 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/a7bef91497aa:0, corePoolSize=5, maxPoolSize=5 2024-11-11T04:35:26,884 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/a7bef91497aa:0, corePoolSize=5, maxPoolSize=5 2024-11-11T04:35:26,884 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/a7bef91497aa:0, corePoolSize=5, maxPoolSize=5 2024-11-11T04:35:26,884 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/a7bef91497aa:0, corePoolSize=10, maxPoolSize=10 2024-11-11T04:35:26,884 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:35:26,884 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/a7bef91497aa:0, corePoolSize=2, maxPoolSize=2 2024-11-11T04:35:26,884 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:35:26,884 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731299756884 2024-11-11T04:35:26,885 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-11T04:35:26,885 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-11T04:35:26,885 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-11T04:35:26,885 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-11T04:35:26,885 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-11T04:35:26,885 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-11T04:35:26,885 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-11T04:35:26,885 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-11T04:35:26,885 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-11T04:35:26,885 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-11T04:35:26,885 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-11T04:35:26,885 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-11T04:35:26,886 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-11T04:35:26,886 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-11T04:35:26,886 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/a7bef91497aa:0:becomeActiveMaster-HFileCleaner.large.0-1731299726886,5,FailOnTimeoutGroup] 2024-11-11T04:35:26,886 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/a7bef91497aa:0:becomeActiveMaster-HFileCleaner.small.0-1731299726886,5,FailOnTimeoutGroup] 2024-11-11T04:35:26,886 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-11T04:35:26,886 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-11T04:35:26,886 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-11T04:35:26,886 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-11T04:35:26,886 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:35:26,886 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-11T04:35:26,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44287 is added to blk_1073741831_1007 (size=1321) 2024-11-11T04:35:26,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37899 is added to blk_1073741831_1007 (size=1321) 2024-11-11T04:35:26,894 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-11T04:35:26,894 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d 2024-11-11T04:35:26,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37899 is added to blk_1073741832_1008 (size=32) 2024-11-11T04:35:26,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44287 is added to blk_1073741832_1008 (size=32) 2024-11-11T04:35:26,904 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T04:35:26,905 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-11T04:35:26,906 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-11T04:35:26,906 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:35:26,907 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T04:35:26,907 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-11T04:35:26,908 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-11T04:35:26,908 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:35:26,908 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T04:35:26,908 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-11T04:35:26,909 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-11T04:35:26,909 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:35:26,909 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T04:35:26,910 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-11T04:35:26,910 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-11T04:35:26,910 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:35:26,911 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T04:35:26,911 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-11T04:35:26,912 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/data/hbase/meta/1588230740 2024-11-11T04:35:26,912 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/data/hbase/meta/1588230740 2024-11-11T04:35:26,913 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-11T04:35:26,913 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-11T04:35:26,914 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-11T04:35:26,915 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-11T04:35:26,916 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T04:35:26,917 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=821050, jitterRate=0.044020116329193115}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-11T04:35:26,917 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731299726904Initializing all the Stores at 1731299726905 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731299726905Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731299726905Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731299726905Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731299726905Cleaning up temporary data from old regions at 1731299726913 (+8 ms)Region opened successfully at 1731299726917 (+4 ms) 2024-11-11T04:35:26,918 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-11T04:35:26,918 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-11T04:35:26,918 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-11T04:35:26,918 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-11T04:35:26,918 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-11T04:35:26,918 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-11T04:35:26,918 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731299726917Disabling compacts and flushes for region at 1731299726917Disabling writes for close at 1731299726918 (+1 ms)Writing region close event to WAL at 1731299726918Closed at 1731299726918 2024-11-11T04:35:26,919 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-11T04:35:26,919 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-11T04:35:26,919 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-11T04:35:26,920 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-11T04:35:26,922 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-11T04:35:26,944 INFO [RS:0;a7bef91497aa:34791 {}] regionserver.HRegionServer(746): ClusterId : 4f457901-c245-4348-8fb1-e18ca6c89528 2024-11-11T04:35:26,944 DEBUG [RS:0;a7bef91497aa:34791 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-11T04:35:26,946 DEBUG [RS:0;a7bef91497aa:34791 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-11T04:35:26,946 DEBUG [RS:0;a7bef91497aa:34791 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-11T04:35:26,949 DEBUG [RS:0;a7bef91497aa:34791 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-11T04:35:26,949 DEBUG [RS:0;a7bef91497aa:34791 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@147f035, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=a7bef91497aa/172.17.0.2:0 2024-11-11T04:35:26,961 DEBUG [RS:0;a7bef91497aa:34791 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;a7bef91497aa:34791 2024-11-11T04:35:26,961 INFO [RS:0;a7bef91497aa:34791 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-11T04:35:26,961 INFO [RS:0;a7bef91497aa:34791 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-11T04:35:26,961 DEBUG [RS:0;a7bef91497aa:34791 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-11T04:35:26,962 INFO [RS:0;a7bef91497aa:34791 {}] regionserver.HRegionServer(2659): reportForDuty to master=a7bef91497aa,46241,1731299726686 with port=34791, startcode=1731299726729 2024-11-11T04:35:26,962 DEBUG [RS:0;a7bef91497aa:34791 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-11T04:35:26,964 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54981, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-11-11T04:35:26,965 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46241 {}] master.ServerManager(363): Checking decommissioned status of RegionServer a7bef91497aa,34791,1731299726729 2024-11-11T04:35:26,965 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46241 {}] master.ServerManager(517): Registering regionserver=a7bef91497aa,34791,1731299726729 2024-11-11T04:35:26,967 DEBUG [RS:0;a7bef91497aa:34791 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d 2024-11-11T04:35:26,967 DEBUG [RS:0;a7bef91497aa:34791 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:42763 2024-11-11T04:35:26,967 DEBUG [RS:0;a7bef91497aa:34791 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-11T04:35:26,969 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46241-0x101959ccab90000, quorum=127.0.0.1:53725, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-11T04:35:26,969 DEBUG [RS:0;a7bef91497aa:34791 {}] zookeeper.ZKUtil(111): regionserver:34791-0x101959ccab90001, quorum=127.0.0.1:53725, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/a7bef91497aa,34791,1731299726729 2024-11-11T04:35:26,969 WARN [RS:0;a7bef91497aa:34791 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-11T04:35:26,969 INFO [RS:0;a7bef91497aa:34791 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T04:35:26,970 DEBUG [RS:0;a7bef91497aa:34791 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/WALs/a7bef91497aa,34791,1731299726729 2024-11-11T04:35:26,970 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [a7bef91497aa,34791,1731299726729] 2024-11-11T04:35:26,973 INFO [RS:0;a7bef91497aa:34791 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-11T04:35:26,975 INFO [RS:0;a7bef91497aa:34791 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-11T04:35:26,976 INFO [RS:0;a7bef91497aa:34791 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-11T04:35:26,976 INFO [RS:0;a7bef91497aa:34791 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T04:35:26,976 INFO [RS:0;a7bef91497aa:34791 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-11T04:35:26,977 INFO [RS:0;a7bef91497aa:34791 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-11T04:35:26,977 INFO [RS:0;a7bef91497aa:34791 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-11T04:35:26,977 DEBUG [RS:0;a7bef91497aa:34791 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:35:26,977 DEBUG [RS:0;a7bef91497aa:34791 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:35:26,977 DEBUG [RS:0;a7bef91497aa:34791 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:35:26,977 DEBUG [RS:0;a7bef91497aa:34791 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:35:26,977 DEBUG [RS:0;a7bef91497aa:34791 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:35:26,977 DEBUG [RS:0;a7bef91497aa:34791 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/a7bef91497aa:0, corePoolSize=2, maxPoolSize=2 2024-11-11T04:35:26,977 DEBUG [RS:0;a7bef91497aa:34791 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:35:26,977 DEBUG [RS:0;a7bef91497aa:34791 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:35:26,978 DEBUG [RS:0;a7bef91497aa:34791 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:35:26,978 DEBUG [RS:0;a7bef91497aa:34791 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:35:26,978 DEBUG [RS:0;a7bef91497aa:34791 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:35:26,978 DEBUG [RS:0;a7bef91497aa:34791 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:35:26,978 DEBUG [RS:0;a7bef91497aa:34791 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/a7bef91497aa:0, corePoolSize=3, maxPoolSize=3 2024-11-11T04:35:26,978 DEBUG [RS:0;a7bef91497aa:34791 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/a7bef91497aa:0, corePoolSize=3, maxPoolSize=3 2024-11-11T04:35:26,978 INFO [RS:0;a7bef91497aa:34791 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T04:35:26,978 INFO [RS:0;a7bef91497aa:34791 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T04:35:26,978 INFO [RS:0;a7bef91497aa:34791 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T04:35:26,978 INFO [RS:0;a7bef91497aa:34791 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-11T04:35:26,978 INFO [RS:0;a7bef91497aa:34791 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-11T04:35:26,979 INFO [RS:0;a7bef91497aa:34791 {}] hbase.ChoreService(168): Chore ScheduledChore name=a7bef91497aa,34791,1731299726729-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-11T04:35:27,001 INFO [RS:0;a7bef91497aa:34791 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-11T04:35:27,001 INFO [RS:0;a7bef91497aa:34791 {}] hbase.ChoreService(168): Chore ScheduledChore name=a7bef91497aa,34791,1731299726729-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T04:35:27,001 INFO [RS:0;a7bef91497aa:34791 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T04:35:27,001 INFO [RS:0;a7bef91497aa:34791 {}] regionserver.Replication(171): a7bef91497aa,34791,1731299726729 started 2024-11-11T04:35:27,016 INFO [RS:0;a7bef91497aa:34791 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T04:35:27,016 INFO [RS:0;a7bef91497aa:34791 {}] regionserver.HRegionServer(1482): Serving as a7bef91497aa,34791,1731299726729, RpcServer on a7bef91497aa/172.17.0.2:34791, sessionid=0x101959ccab90001 2024-11-11T04:35:27,016 DEBUG [RS:0;a7bef91497aa:34791 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-11T04:35:27,016 DEBUG [RS:0;a7bef91497aa:34791 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager a7bef91497aa,34791,1731299726729 2024-11-11T04:35:27,016 DEBUG [RS:0;a7bef91497aa:34791 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'a7bef91497aa,34791,1731299726729' 2024-11-11T04:35:27,016 DEBUG [RS:0;a7bef91497aa:34791 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-11T04:35:27,017 DEBUG [RS:0;a7bef91497aa:34791 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-11T04:35:27,017 DEBUG [RS:0;a7bef91497aa:34791 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-11T04:35:27,017 DEBUG [RS:0;a7bef91497aa:34791 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-11T04:35:27,017 DEBUG [RS:0;a7bef91497aa:34791 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager a7bef91497aa,34791,1731299726729 2024-11-11T04:35:27,017 DEBUG [RS:0;a7bef91497aa:34791 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'a7bef91497aa,34791,1731299726729' 2024-11-11T04:35:27,017 DEBUG [RS:0;a7bef91497aa:34791 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-11T04:35:27,018 DEBUG [RS:0;a7bef91497aa:34791 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-11T04:35:27,018 DEBUG [RS:0;a7bef91497aa:34791 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-11T04:35:27,018 INFO [RS:0;a7bef91497aa:34791 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-11T04:35:27,018 INFO [RS:0;a7bef91497aa:34791 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-11T04:35:27,072 WARN [a7bef91497aa:46241 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-11T04:35:27,120 INFO [RS:0;a7bef91497aa:34791 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a7bef91497aa%2C34791%2C1731299726729, suffix=, logDir=hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/WALs/a7bef91497aa,34791,1731299726729, archiveDir=hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/oldWALs, maxLogs=32 2024-11-11T04:35:27,121 INFO [RS:0;a7bef91497aa:34791 {}] monitor.StreamSlowMonitor(122): New stream slow monitor a7bef91497aa%2C34791%2C1731299726729.1731299727120 2024-11-11T04:35:27,126 INFO [RS:0;a7bef91497aa:34791 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/WALs/a7bef91497aa,34791,1731299726729/a7bef91497aa%2C34791%2C1731299726729.1731299727120 2024-11-11T04:35:27,127 DEBUG [RS:0;a7bef91497aa:34791 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41459:41459),(127.0.0.1/127.0.0.1:46703:46703)] 2024-11-11T04:35:27,322 DEBUG [a7bef91497aa:46241 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-11T04:35:27,323 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=a7bef91497aa,34791,1731299726729 2024-11-11T04:35:27,324 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as a7bef91497aa,34791,1731299726729, state=OPENING 2024-11-11T04:35:27,326 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-11T04:35:27,327 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34791-0x101959ccab90001, quorum=127.0.0.1:53725, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:35:27,327 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46241-0x101959ccab90000, quorum=127.0.0.1:53725, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:35:27,328 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-11T04:35:27,328 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T04:35:27,328 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T04:35:27,328 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=a7bef91497aa,34791,1731299726729}] 2024-11-11T04:35:27,398 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:27,405 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:27,457 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-11T04:35:27,457 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-11T04:35:27,457 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-11T04:35:27,482 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-11T04:35:27,484 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53499, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-11T04:35:27,487 INFO [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-11T04:35:27,488 INFO [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T04:35:27,490 INFO [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a7bef91497aa%2C34791%2C1731299726729.meta, suffix=.meta, logDir=hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/WALs/a7bef91497aa,34791,1731299726729, archiveDir=hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/oldWALs, maxLogs=32 2024-11-11T04:35:27,490 INFO [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor a7bef91497aa%2C34791%2C1731299726729.meta.1731299727490.meta 2024-11-11T04:35:27,495 INFO [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/WALs/a7bef91497aa,34791,1731299726729/a7bef91497aa%2C34791%2C1731299726729.meta.1731299727490.meta 2024-11-11T04:35:27,500 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41459:41459),(127.0.0.1/127.0.0.1:46703:46703)] 2024-11-11T04:35:27,508 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-11T04:35:27,508 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-11T04:35:27,508 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-11T04:35:27,509 INFO [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-11T04:35:27,509 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-11T04:35:27,509 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T04:35:27,509 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-11T04:35:27,509 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-11T04:35:27,510 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-11T04:35:27,511 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-11T04:35:27,511 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:35:27,512 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T04:35:27,512 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-11T04:35:27,512 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-11T04:35:27,512 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:35:27,513 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T04:35:27,513 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-11T04:35:27,514 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-11T04:35:27,514 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:35:27,514 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T04:35:27,514 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-11T04:35:27,515 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-11T04:35:27,515 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:35:27,515 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T04:35:27,515 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-11T04:35:27,516 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/data/hbase/meta/1588230740 2024-11-11T04:35:27,517 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/data/hbase/meta/1588230740 2024-11-11T04:35:27,518 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-11T04:35:27,518 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-11T04:35:27,518 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-11T04:35:27,520 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-11T04:35:27,520 INFO [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=815583, jitterRate=0.037068456411361694}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-11T04:35:27,520 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-11T04:35:27,521 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731299727509Writing region info on filesystem at 1731299727509Initializing all the Stores at 1731299727510 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731299727510Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731299727510Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731299727510Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731299727510Cleaning up temporary data from old regions at 1731299727518 (+8 ms)Running coprocessor post-open hooks at 1731299727520 (+2 ms)Region opened successfully at 1731299727521 (+1 ms) 2024-11-11T04:35:27,522 INFO [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731299727481 2024-11-11T04:35:27,525 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-11T04:35:27,525 INFO [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-11T04:35:27,525 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=a7bef91497aa,34791,1731299726729 2024-11-11T04:35:27,526 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as a7bef91497aa,34791,1731299726729, state=OPEN 2024-11-11T04:35:27,532 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34791-0x101959ccab90001, quorum=127.0.0.1:53725, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-11T04:35:27,532 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46241-0x101959ccab90000, quorum=127.0.0.1:53725, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-11T04:35:27,532 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=a7bef91497aa,34791,1731299726729 2024-11-11T04:35:27,532 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T04:35:27,532 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T04:35:27,535 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-11T04:35:27,535 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=a7bef91497aa,34791,1731299726729 in 204 msec 2024-11-11T04:35:27,537 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-11T04:35:27,537 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 616 msec 2024-11-11T04:35:27,538 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-11T04:35:27,538 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-11T04:35:27,540 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-11T04:35:27,540 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=a7bef91497aa,34791,1731299726729, seqNum=-1] 2024-11-11T04:35:27,540 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T04:35:27,542 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40429, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T04:35:27,547 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 664 msec 2024-11-11T04:35:27,547 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731299727547, completionTime=-1 2024-11-11T04:35:27,547 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-11T04:35:27,547 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-11T04:35:27,549 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-11T04:35:27,549 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731299787549 2024-11-11T04:35:27,549 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731299847549 2024-11-11T04:35:27,549 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-11T04:35:27,550 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a7bef91497aa,46241,1731299726686-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T04:35:27,550 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a7bef91497aa,46241,1731299726686-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T04:35:27,550 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a7bef91497aa,46241,1731299726686-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T04:35:27,550 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-a7bef91497aa:46241, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T04:35:27,550 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-11T04:35:27,550 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-11T04:35:27,551 DEBUG [master/a7bef91497aa:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-11T04:35:27,553 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.792sec 2024-11-11T04:35:27,553 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-11T04:35:27,554 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-11T04:35:27,554 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-11T04:35:27,554 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-11T04:35:27,554 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-11T04:35:27,554 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a7bef91497aa,46241,1731299726686-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-11T04:35:27,554 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a7bef91497aa,46241,1731299726686-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-11T04:35:27,556 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-11T04:35:27,556 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-11T04:35:27,556 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a7bef91497aa,46241,1731299726686-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T04:35:27,644 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@76117c9c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T04:35:27,644 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request a7bef91497aa,46241,-1 for getting cluster id 2024-11-11T04:35:27,644 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-11T04:35:27,646 DEBUG [HMaster-EventLoopGroup-12-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '4f457901-c245-4348-8fb1-e18ca6c89528' 2024-11-11T04:35:27,647 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-11T04:35:27,647 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "4f457901-c245-4348-8fb1-e18ca6c89528" 2024-11-11T04:35:27,647 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@10268372, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T04:35:27,647 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [a7bef91497aa,46241,-1] 2024-11-11T04:35:27,647 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-11T04:35:27,648 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T04:35:27,649 INFO [HMaster-EventLoopGroup-12-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57704, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-11T04:35:27,650 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2ed5ac32, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T04:35:27,650 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-11T04:35:27,651 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=a7bef91497aa,34791,1731299726729, seqNum=-1] 2024-11-11T04:35:27,651 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T04:35:27,652 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47694, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T04:35:27,654 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=a7bef91497aa,46241,1731299726686 2024-11-11T04:35:27,654 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T04:35:27,657 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-11T04:35:27,657 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-11T04:35:27,658 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.AsyncConnectionImpl(321): The fetched master address is a7bef91497aa,46241,1731299726686 2024-11-11T04:35:27,658 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@263327be 2024-11-11T04:35:27,658 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-11T04:35:27,659 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57712, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-11T04:35:27,659 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46241 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-11T04:35:27,659 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46241 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-11T04:35:27,659 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46241 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testCompactionRecordDoesntBlockRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-11T04:35:27,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46241 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-11T04:35:27,662 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-11T04:35:27,662 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:35:27,662 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46241 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testCompactionRecordDoesntBlockRolling" procId is: 4 2024-11-11T04:35:27,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46241 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-11T04:35:27,663 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-11T04:35:27,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37899 is added to blk_1073741835_1011 (size=405) 2024-11-11T04:35:27,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44287 is added to blk_1073741835_1011 (size=405) 2024-11-11T04:35:27,671 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => aa338dfb527d83c5946fe75777997918, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731299727659.aa338dfb527d83c5946fe75777997918.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testCompactionRecordDoesntBlockRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d 2024-11-11T04:35:27,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37899 is added to blk_1073741836_1012 (size=88) 2024-11-11T04:35:27,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44287 is added to blk_1073741836_1012 (size=88) 2024-11-11T04:35:27,677 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731299727659.aa338dfb527d83c5946fe75777997918.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T04:35:27,677 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1722): Closing aa338dfb527d83c5946fe75777997918, disabling compactions & flushes 2024-11-11T04:35:27,677 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731299727659.aa338dfb527d83c5946fe75777997918. 2024-11-11T04:35:27,677 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731299727659.aa338dfb527d83c5946fe75777997918. 2024-11-11T04:35:27,677 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731299727659.aa338dfb527d83c5946fe75777997918. after waiting 0 ms 2024-11-11T04:35:27,677 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731299727659.aa338dfb527d83c5946fe75777997918. 2024-11-11T04:35:27,677 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731299727659.aa338dfb527d83c5946fe75777997918. 2024-11-11T04:35:27,677 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for aa338dfb527d83c5946fe75777997918: Waiting for close lock at 1731299727677Disabling compacts and flushes for region at 1731299727677Disabling writes for close at 1731299727677Writing region close event to WAL at 1731299727677Closed at 1731299727677 2024-11-11T04:35:27,678 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-11T04:35:27,679 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731299727659.aa338dfb527d83c5946fe75777997918.","families":{"info":[{"qualifier":"regioninfo","vlen":87,"tag":[],"timestamp":"1731299727678"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731299727678"}]},"ts":"1731299727678"} 2024-11-11T04:35:27,681 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-11T04:35:27,682 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-11T04:35:27,682 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731299727682"}]},"ts":"1731299727682"} 2024-11-11T04:35:27,684 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLING in hbase:meta 2024-11-11T04:35:27,684 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=aa338dfb527d83c5946fe75777997918, ASSIGN}] 2024-11-11T04:35:27,686 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=aa338dfb527d83c5946fe75777997918, ASSIGN 2024-11-11T04:35:27,687 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=aa338dfb527d83c5946fe75777997918, ASSIGN; state=OFFLINE, location=a7bef91497aa,34791,1731299726729; forceNewPlan=false, retain=false 2024-11-11T04:35:27,837 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=aa338dfb527d83c5946fe75777997918, regionState=OPENING, regionLocation=a7bef91497aa,34791,1731299726729 2024-11-11T04:35:27,840 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=aa338dfb527d83c5946fe75777997918, ASSIGN because future has completed 2024-11-11T04:35:27,840 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure aa338dfb527d83c5946fe75777997918, server=a7bef91497aa,34791,1731299726729}] 2024-11-11T04:35:27,997 INFO [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731299727659.aa338dfb527d83c5946fe75777997918. 2024-11-11T04:35:27,997 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => aa338dfb527d83c5946fe75777997918, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731299727659.aa338dfb527d83c5946fe75777997918.', STARTKEY => '', ENDKEY => ''} 2024-11-11T04:35:27,997 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testCompactionRecordDoesntBlockRolling aa338dfb527d83c5946fe75777997918 2024-11-11T04:35:27,997 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731299727659.aa338dfb527d83c5946fe75777997918.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T04:35:27,997 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for aa338dfb527d83c5946fe75777997918 2024-11-11T04:35:27,997 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for aa338dfb527d83c5946fe75777997918 2024-11-11T04:35:27,999 INFO [StoreOpener-aa338dfb527d83c5946fe75777997918-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region aa338dfb527d83c5946fe75777997918 2024-11-11T04:35:28,000 INFO [StoreOpener-aa338dfb527d83c5946fe75777997918-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region aa338dfb527d83c5946fe75777997918 columnFamilyName info 2024-11-11T04:35:28,000 DEBUG [StoreOpener-aa338dfb527d83c5946fe75777997918-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:35:28,000 INFO [StoreOpener-aa338dfb527d83c5946fe75777997918-1 {}] regionserver.HStore(327): Store=aa338dfb527d83c5946fe75777997918/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T04:35:28,001 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for aa338dfb527d83c5946fe75777997918 2024-11-11T04:35:28,001 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aa338dfb527d83c5946fe75777997918 2024-11-11T04:35:28,002 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aa338dfb527d83c5946fe75777997918 2024-11-11T04:35:28,002 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for aa338dfb527d83c5946fe75777997918 2024-11-11T04:35:28,002 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for aa338dfb527d83c5946fe75777997918 2024-11-11T04:35:28,003 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for aa338dfb527d83c5946fe75777997918 2024-11-11T04:35:28,005 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aa338dfb527d83c5946fe75777997918/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T04:35:28,006 INFO [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened aa338dfb527d83c5946fe75777997918; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=700720, jitterRate=-0.1089896708726883}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-11T04:35:28,006 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for aa338dfb527d83c5946fe75777997918 2024-11-11T04:35:28,006 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for aa338dfb527d83c5946fe75777997918: Running coprocessor pre-open hook at 1731299727997Writing region info on filesystem at 1731299727997Initializing all the Stores at 1731299727998 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731299727998Cleaning up temporary data from old regions at 1731299728002 (+4 ms)Running coprocessor post-open hooks at 1731299728006 (+4 ms)Region opened successfully at 1731299728006 2024-11-11T04:35:28,007 INFO [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731299727659.aa338dfb527d83c5946fe75777997918., pid=6, masterSystemTime=1731299727993 2024-11-11T04:35:28,010 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731299727659.aa338dfb527d83c5946fe75777997918. 2024-11-11T04:35:28,010 INFO [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731299727659.aa338dfb527d83c5946fe75777997918. 2024-11-11T04:35:28,011 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=aa338dfb527d83c5946fe75777997918, regionState=OPEN, openSeqNum=2, regionLocation=a7bef91497aa,34791,1731299726729 2024-11-11T04:35:28,013 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure aa338dfb527d83c5946fe75777997918, server=a7bef91497aa,34791,1731299726729 because future has completed 2024-11-11T04:35:28,017 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-11T04:35:28,017 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure aa338dfb527d83c5946fe75777997918, server=a7bef91497aa,34791,1731299726729 in 174 msec 2024-11-11T04:35:28,019 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-11T04:35:28,019 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=aa338dfb527d83c5946fe75777997918, ASSIGN in 333 msec 2024-11-11T04:35:28,020 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-11T04:35:28,020 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731299728020"}]},"ts":"1731299728020"} 2024-11-11T04:35:28,022 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLED in hbase:meta 2024-11-11T04:35:28,023 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-11T04:35:28,025 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 364 msec 2024-11-11T04:35:28,399 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:28,406 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:29,399 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:29,406 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:30,400 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:30,407 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:31,400 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:31,407 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:32,401 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:32,407 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:32,959 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-11T04:35:32,960 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:35:32,960 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:35:32,960 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:35:32,961 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:35:32,961 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:35:32,961 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:35:32,976 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:35:32,977 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:35:32,977 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:35:32,977 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:35:32,978 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:35:32,978 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:35:32,981 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:35:32,981 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:35:32,981 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:35:32,983 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:35:32,989 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-11T04:35:32,989 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testCompactionRecordDoesntBlockRolling' 2024-11-11T04:35:33,401 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:33,408 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:34,402 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:34,408 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:35,402 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:35,409 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:36,403 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:36,409 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:37,404 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:37,410 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:37,456 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-11T04:35:37,457 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-11T04:35:37,457 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-11T04:35:37,457 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-11T04:35:37,457 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-11T04:35:37,457 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling Metrics about Tables on a single HBase RegionServer 2024-11-11T04:35:37,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46241 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-11T04:35:37,705 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-11T04:35:37,705 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testCompactionRecordDoesntBlockRolling,, stopping at row=TestLogRolling-testCompactionRecordDoesntBlockRolling ,, for max=2147483647 with caching=100 2024-11-11T04:35:37,708 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-11T04:35:37,708 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731299727659.aa338dfb527d83c5946fe75777997918. 2024-11-11T04:35:37,711 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testCompactionRecordDoesntBlockRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731299727659.aa338dfb527d83c5946fe75777997918., hostname=a7bef91497aa,34791,1731299726729, seqNum=2] 2024-11-11T04:35:37,717 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46241 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-11T04:35:37,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46241 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-11T04:35:37,723 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-11T04:35:37,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46241 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-11T04:35:37,725 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-11T04:35:37,726 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-11T04:35:37,886 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34791 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-11T04:35:37,887 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a7bef91497aa:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731299727659.aa338dfb527d83c5946fe75777997918. 2024-11-11T04:35:37,887 INFO [RS_FLUSH_OPERATIONS-regionserver/a7bef91497aa:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing aa338dfb527d83c5946fe75777997918 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-11T04:35:37,903 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a7bef91497aa:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aa338dfb527d83c5946fe75777997918/.tmp/info/844c18e720f74ff38811e4a962cf3d90 is 1080, key is row0001/info:/1731299737712/Put/seqid=0 2024-11-11T04:35:37,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37899 is added to blk_1073741837_1013 (size=6033) 2024-11-11T04:35:37,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44287 is added to blk_1073741837_1013 (size=6033) 2024-11-11T04:35:37,909 INFO [RS_FLUSH_OPERATIONS-regionserver/a7bef91497aa:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aa338dfb527d83c5946fe75777997918/.tmp/info/844c18e720f74ff38811e4a962cf3d90 2024-11-11T04:35:37,915 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a7bef91497aa:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aa338dfb527d83c5946fe75777997918/.tmp/info/844c18e720f74ff38811e4a962cf3d90 as hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aa338dfb527d83c5946fe75777997918/info/844c18e720f74ff38811e4a962cf3d90 2024-11-11T04:35:37,921 INFO [RS_FLUSH_OPERATIONS-regionserver/a7bef91497aa:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aa338dfb527d83c5946fe75777997918/info/844c18e720f74ff38811e4a962cf3d90, entries=1, sequenceid=5, filesize=5.9 K 2024-11-11T04:35:37,922 INFO [RS_FLUSH_OPERATIONS-regionserver/a7bef91497aa:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for aa338dfb527d83c5946fe75777997918 in 35ms, sequenceid=5, compaction requested=false 2024-11-11T04:35:37,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a7bef91497aa:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for aa338dfb527d83c5946fe75777997918: 2024-11-11T04:35:37,923 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a7bef91497aa:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731299727659.aa338dfb527d83c5946fe75777997918. 2024-11-11T04:35:37,925 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a7bef91497aa:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-11T04:35:37,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46241 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-11T04:35:37,931 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-11T04:35:37,931 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 202 msec 2024-11-11T04:35:37,933 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 213 msec 2024-11-11T04:35:38,404 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:38,410 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:39,405 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:39,410 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:40,405 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:40,411 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:41,406 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:41,411 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:42,406 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:42,412 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:43,407 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:43,412 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:44,407 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:44,413 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:45,408 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:45,413 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:46,408 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:46,413 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:47,409 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:47,414 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:47,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46241 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-11T04:35:47,816 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-11T04:35:47,819 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46241 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-11T04:35:47,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46241 {}] procedure2.ProcedureExecutor(1139): Stored pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-11T04:35:47,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46241 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-11T04:35:47,821 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-11T04:35:47,822 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-11T04:35:47,823 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-11T04:35:47,976 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34791 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=10 2024-11-11T04:35:47,976 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a7bef91497aa:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731299727659.aa338dfb527d83c5946fe75777997918. 2024-11-11T04:35:47,977 INFO [RS_FLUSH_OPERATIONS-regionserver/a7bef91497aa:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2902): Flushing aa338dfb527d83c5946fe75777997918 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-11T04:35:47,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a7bef91497aa:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aa338dfb527d83c5946fe75777997918/.tmp/info/7ad1272edf544fb8b3c2cb2edbde4e96 is 1080, key is row0002/info:/1731299747817/Put/seqid=0 2024-11-11T04:35:47,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37899 is added to blk_1073741838_1014 (size=6033) 2024-11-11T04:35:47,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44287 is added to blk_1073741838_1014 (size=6033) 2024-11-11T04:35:47,989 INFO [RS_FLUSH_OPERATIONS-regionserver/a7bef91497aa:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aa338dfb527d83c5946fe75777997918/.tmp/info/7ad1272edf544fb8b3c2cb2edbde4e96 2024-11-11T04:35:47,996 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a7bef91497aa:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aa338dfb527d83c5946fe75777997918/.tmp/info/7ad1272edf544fb8b3c2cb2edbde4e96 as hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aa338dfb527d83c5946fe75777997918/info/7ad1272edf544fb8b3c2cb2edbde4e96 2024-11-11T04:35:48,001 INFO [RS_FLUSH_OPERATIONS-regionserver/a7bef91497aa:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aa338dfb527d83c5946fe75777997918/info/7ad1272edf544fb8b3c2cb2edbde4e96, entries=1, sequenceid=9, filesize=5.9 K 2024-11-11T04:35:48,002 INFO [RS_FLUSH_OPERATIONS-regionserver/a7bef91497aa:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for aa338dfb527d83c5946fe75777997918 in 25ms, sequenceid=9, compaction requested=false 2024-11-11T04:35:48,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a7bef91497aa:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2603): Flush status journal for aa338dfb527d83c5946fe75777997918: 2024-11-11T04:35:48,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a7bef91497aa:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731299727659.aa338dfb527d83c5946fe75777997918. 2024-11-11T04:35:48,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a7bef91497aa:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=10 2024-11-11T04:35:48,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46241 {}] master.HMaster(4169): Remote procedure done, pid=10 2024-11-11T04:35:48,006 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-11-11T04:35:48,006 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 181 msec 2024-11-11T04:35:48,009 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 188 msec 2024-11-11T04:35:48,409 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:48,414 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:49,410 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:49,415 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:50,411 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:50,415 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:51,411 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:51,416 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:52,412 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:52,412 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 after 68044ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor192.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:35:52,416 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:52,416 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta after 68030ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor192.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:35:53,413 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:53,417 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:54,413 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:54,417 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:55,414 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:55,417 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:56,414 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:56,418 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:56,670 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-11T04:35:57,415 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:57,418 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:57,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46241 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-11T04:35:57,925 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-11T04:35:57,927 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor a7bef91497aa%2C34791%2C1731299726729.1731299757927 2024-11-11T04:35:57,941 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:35:57,941 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:35:57,941 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:35:57,942 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:35:57,942 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:35:57,942 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/WALs/a7bef91497aa,34791,1731299726729/a7bef91497aa%2C34791%2C1731299726729.1731299727120 with entries=8, filesize=5.41 KB; new WAL /user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/WALs/a7bef91497aa,34791,1731299726729/a7bef91497aa%2C34791%2C1731299726729.1731299757927 2024-11-11T04:35:57,944 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46703:46703),(127.0.0.1/127.0.0.1:41459:41459)] 2024-11-11T04:35:57,944 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/WALs/a7bef91497aa,34791,1731299726729/a7bef91497aa%2C34791%2C1731299726729.1731299727120 is not closed yet, will try archiving it next time 2024-11-11T04:35:57,945 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46241 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-11T04:35:57,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37899 is added to blk_1073741833_1009 (size=5546) 2024-11-11T04:35:57,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44287 is added to blk_1073741833_1009 (size=5546) 2024-11-11T04:35:57,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46241 {}] procedure2.ProcedureExecutor(1139): Stored pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-11T04:35:57,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46241 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-11T04:35:57,948 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-11T04:35:57,949 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-11T04:35:57,949 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-11T04:35:58,102 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34791 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=12 2024-11-11T04:35:58,102 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a7bef91497aa:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731299727659.aa338dfb527d83c5946fe75777997918. 2024-11-11T04:35:58,103 INFO [RS_FLUSH_OPERATIONS-regionserver/a7bef91497aa:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2902): Flushing aa338dfb527d83c5946fe75777997918 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-11T04:35:58,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a7bef91497aa:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aa338dfb527d83c5946fe75777997918/.tmp/info/2f77be844dc64b839102e7c3f7739afe is 1080, key is row0003/info:/1731299757926/Put/seqid=0 2024-11-11T04:35:58,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44287 is added to blk_1073741840_1016 (size=6033) 2024-11-11T04:35:58,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37899 is added to blk_1073741840_1016 (size=6033) 2024-11-11T04:35:58,113 INFO [RS_FLUSH_OPERATIONS-regionserver/a7bef91497aa:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aa338dfb527d83c5946fe75777997918/.tmp/info/2f77be844dc64b839102e7c3f7739afe 2024-11-11T04:35:58,118 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a7bef91497aa:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aa338dfb527d83c5946fe75777997918/.tmp/info/2f77be844dc64b839102e7c3f7739afe as hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aa338dfb527d83c5946fe75777997918/info/2f77be844dc64b839102e7c3f7739afe 2024-11-11T04:35:58,123 INFO [RS_FLUSH_OPERATIONS-regionserver/a7bef91497aa:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aa338dfb527d83c5946fe75777997918/info/2f77be844dc64b839102e7c3f7739afe, entries=1, sequenceid=13, filesize=5.9 K 2024-11-11T04:35:58,124 INFO [RS_FLUSH_OPERATIONS-regionserver/a7bef91497aa:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for aa338dfb527d83c5946fe75777997918 in 21ms, sequenceid=13, compaction requested=true 2024-11-11T04:35:58,125 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a7bef91497aa:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2603): Flush status journal for aa338dfb527d83c5946fe75777997918: 2024-11-11T04:35:58,125 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a7bef91497aa:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731299727659.aa338dfb527d83c5946fe75777997918. 2024-11-11T04:35:58,125 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a7bef91497aa:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=12 2024-11-11T04:35:58,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46241 {}] master.HMaster(4169): Remote procedure done, pid=12 2024-11-11T04:35:58,129 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-11-11T04:35:58,129 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 177 msec 2024-11-11T04:35:58,131 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 184 msec 2024-11-11T04:35:58,415 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:58,419 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:59,416 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:35:59,419 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:00,416 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:00,420 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:01,417 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:01,420 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:02,418 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:02,420 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:03,418 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:03,421 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:04,419 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:04,421 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:05,419 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:05,422 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:06,420 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:06,422 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:07,420 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:07,422 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:08,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46241 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-11T04:36:08,034 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-11T04:36:08,035 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T04:36:08,036 DEBUG [Time-limited test {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 18099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T04:36:08,036 DEBUG [Time-limited test {}] regionserver.HStore(1541): aa338dfb527d83c5946fe75777997918/info is initiating minor compaction (all files) 2024-11-11T04:36:08,036 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-11T04:36:08,036 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T04:36:08,036 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of aa338dfb527d83c5946fe75777997918/info in TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731299727659.aa338dfb527d83c5946fe75777997918. 2024-11-11T04:36:08,036 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aa338dfb527d83c5946fe75777997918/info/844c18e720f74ff38811e4a962cf3d90, hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aa338dfb527d83c5946fe75777997918/info/7ad1272edf544fb8b3c2cb2edbde4e96, hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aa338dfb527d83c5946fe75777997918/info/2f77be844dc64b839102e7c3f7739afe] into tmpdir=hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aa338dfb527d83c5946fe75777997918/.tmp, totalSize=17.7 K 2024-11-11T04:36:08,037 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 844c18e720f74ff38811e4a962cf3d90, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1731299737712 2024-11-11T04:36:08,037 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 7ad1272edf544fb8b3c2cb2edbde4e96, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=9, earliestPutTs=1731299747817 2024-11-11T04:36:08,038 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 2f77be844dc64b839102e7c3f7739afe, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1731299757926 2024-11-11T04:36:08,049 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): aa338dfb527d83c5946fe75777997918#info#compaction#46 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T04:36:08,049 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aa338dfb527d83c5946fe75777997918/.tmp/info/6dd2165627474cbcb85382adfdfe4eba is 1080, key is row0001/info:/1731299737712/Put/seqid=0 2024-11-11T04:36:08,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44287 is added to blk_1073741841_1017 (size=8296) 2024-11-11T04:36:08,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37899 is added to blk_1073741841_1017 (size=8296) 2024-11-11T04:36:08,059 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aa338dfb527d83c5946fe75777997918/.tmp/info/6dd2165627474cbcb85382adfdfe4eba as hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aa338dfb527d83c5946fe75777997918/info/6dd2165627474cbcb85382adfdfe4eba 2024-11-11T04:36:08,065 INFO [Time-limited test {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in aa338dfb527d83c5946fe75777997918/info of aa338dfb527d83c5946fe75777997918 into 6dd2165627474cbcb85382adfdfe4eba(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T04:36:08,065 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for aa338dfb527d83c5946fe75777997918: 2024-11-11T04:36:08,068 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor a7bef91497aa%2C34791%2C1731299726729.1731299768068 2024-11-11T04:36:08,073 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:36:08,073 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:36:08,073 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:36:08,073 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:36:08,073 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:36:08,073 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/WALs/a7bef91497aa,34791,1731299726729/a7bef91497aa%2C34791%2C1731299726729.1731299757927 with entries=4, filesize=2.45 KB; new WAL /user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/WALs/a7bef91497aa,34791,1731299726729/a7bef91497aa%2C34791%2C1731299726729.1731299768068 2024-11-11T04:36:08,074 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46703:46703),(127.0.0.1/127.0.0.1:41459:41459)] 2024-11-11T04:36:08,074 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/WALs/a7bef91497aa,34791,1731299726729/a7bef91497aa%2C34791%2C1731299726729.1731299757927 is not closed yet, will try archiving it next time 2024-11-11T04:36:08,075 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/WALs/a7bef91497aa,34791,1731299726729/a7bef91497aa%2C34791%2C1731299726729.1731299727120 to hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/oldWALs/a7bef91497aa%2C34791%2C1731299726729.1731299727120 2024-11-11T04:36:08,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44287 is added to blk_1073741839_1015 (size=2520) 2024-11-11T04:36:08,076 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46241 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-11T04:36:08,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37899 is added to blk_1073741839_1015 (size=2520) 2024-11-11T04:36:08,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46241 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-11T04:36:08,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46241 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-11T04:36:08,078 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-11T04:36:08,079 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-11T04:36:08,079 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-11T04:36:08,232 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34791 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=14 2024-11-11T04:36:08,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a7bef91497aa:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731299727659.aa338dfb527d83c5946fe75777997918. 2024-11-11T04:36:08,232 INFO [RS_FLUSH_OPERATIONS-regionserver/a7bef91497aa:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2902): Flushing aa338dfb527d83c5946fe75777997918 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-11T04:36:08,237 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a7bef91497aa:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aa338dfb527d83c5946fe75777997918/.tmp/info/ad249dd19a97499cb0272557203613a6 is 1080, key is row0000/info:/1731299768066/Put/seqid=0 2024-11-11T04:36:08,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44287 is added to blk_1073741843_1019 (size=6033) 2024-11-11T04:36:08,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37899 is added to blk_1073741843_1019 (size=6033) 2024-11-11T04:36:08,242 INFO [RS_FLUSH_OPERATIONS-regionserver/a7bef91497aa:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aa338dfb527d83c5946fe75777997918/.tmp/info/ad249dd19a97499cb0272557203613a6 2024-11-11T04:36:08,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a7bef91497aa:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aa338dfb527d83c5946fe75777997918/.tmp/info/ad249dd19a97499cb0272557203613a6 as hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aa338dfb527d83c5946fe75777997918/info/ad249dd19a97499cb0272557203613a6 2024-11-11T04:36:08,252 INFO [RS_FLUSH_OPERATIONS-regionserver/a7bef91497aa:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aa338dfb527d83c5946fe75777997918/info/ad249dd19a97499cb0272557203613a6, entries=1, sequenceid=18, filesize=5.9 K 2024-11-11T04:36:08,253 INFO [RS_FLUSH_OPERATIONS-regionserver/a7bef91497aa:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for aa338dfb527d83c5946fe75777997918 in 21ms, sequenceid=18, compaction requested=false 2024-11-11T04:36:08,254 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a7bef91497aa:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for aa338dfb527d83c5946fe75777997918: 2024-11-11T04:36:08,254 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a7bef91497aa:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731299727659.aa338dfb527d83c5946fe75777997918. 2024-11-11T04:36:08,254 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a7bef91497aa:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-11-11T04:36:08,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46241 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-11-11T04:36:08,258 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-11-11T04:36:08,258 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 176 msec 2024-11-11T04:36:08,260 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 183 msec 2024-11-11T04:36:08,421 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:08,423 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:08,490 INFO [master/a7bef91497aa:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-11T04:36:08,490 INFO [master/a7bef91497aa:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-11T04:36:09,421 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:09,423 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:10,422 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:10,424 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:11,422 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:11,424 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:12,423 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:12,425 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:12,997 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region aa338dfb527d83c5946fe75777997918, had cached 0 bytes from a total of 14329 2024-11-11T04:36:13,424 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:13,425 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:14,424 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:14,425 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:15,425 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:15,426 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:16,425 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:16,426 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:17,426 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:17,427 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:18,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46241 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-11T04:36:18,116 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-11T04:36:18,118 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor a7bef91497aa%2C34791%2C1731299726729.1731299778118 2024-11-11T04:36:18,124 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:36:18,124 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:36:18,125 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:36:18,125 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:36:18,125 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:36:18,125 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/WALs/a7bef91497aa,34791,1731299726729/a7bef91497aa%2C34791%2C1731299726729.1731299768068 with entries=3, filesize=1.97 KB; new WAL /user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/WALs/a7bef91497aa,34791,1731299726729/a7bef91497aa%2C34791%2C1731299726729.1731299778118 2024-11-11T04:36:18,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44287 is added to blk_1073741842_1018 (size=2026) 2024-11-11T04:36:18,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37899 is added to blk_1073741842_1018 (size=2026) 2024-11-11T04:36:18,127 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46703:46703),(127.0.0.1/127.0.0.1:41459:41459)] 2024-11-11T04:36:18,127 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/WALs/a7bef91497aa,34791,1731299726729/a7bef91497aa%2C34791%2C1731299726729.1731299768068 is not closed yet, will try archiving it next time 2024-11-11T04:36:18,127 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/WALs/a7bef91497aa,34791,1731299726729/a7bef91497aa%2C34791%2C1731299726729.1731299757927 to hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/oldWALs/a7bef91497aa%2C34791%2C1731299726729.1731299757927 2024-11-11T04:36:18,127 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-11T04:36:18,127 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-11T04:36:18,127 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T04:36:18,127 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T04:36:18,127 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T04:36:18,127 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-11T04:36:18,127 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-11T04:36:18,127 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1013903499, stopped=false 2024-11-11T04:36:18,128 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=a7bef91497aa,46241,1731299726686 2024-11-11T04:36:18,129 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34791-0x101959ccab90001, quorum=127.0.0.1:53725, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-11T04:36:18,129 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46241-0x101959ccab90000, quorum=127.0.0.1:53725, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-11T04:36:18,129 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46241-0x101959ccab90000, quorum=127.0.0.1:53725, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:36:18,129 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34791-0x101959ccab90001, quorum=127.0.0.1:53725, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:36:18,129 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-11T04:36:18,130 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-11T04:36:18,130 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T04:36:18,130 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T04:36:18,130 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'a7bef91497aa,34791,1731299726729' ***** 2024-11-11T04:36:18,130 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-11T04:36:18,130 INFO [RS:0;a7bef91497aa:34791 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-11T04:36:18,131 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-11T04:36:18,131 INFO [RS:0;a7bef91497aa:34791 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-11T04:36:18,131 INFO [RS:0;a7bef91497aa:34791 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-11T04:36:18,131 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:46241-0x101959ccab90000, quorum=127.0.0.1:53725, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T04:36:18,131 INFO [RS:0;a7bef91497aa:34791 {}] regionserver.HRegionServer(3091): Received CLOSE for aa338dfb527d83c5946fe75777997918 2024-11-11T04:36:18,131 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:34791-0x101959ccab90001, quorum=127.0.0.1:53725, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T04:36:18,131 INFO [RS:0;a7bef91497aa:34791 {}] regionserver.HRegionServer(959): stopping server a7bef91497aa,34791,1731299726729 2024-11-11T04:36:18,131 INFO [RS:0;a7bef91497aa:34791 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-11T04:36:18,131 INFO [RS:0;a7bef91497aa:34791 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;a7bef91497aa:34791. 2024-11-11T04:36:18,131 DEBUG [RS:0;a7bef91497aa:34791 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T04:36:18,131 DEBUG [RS:0;a7bef91497aa:34791 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T04:36:18,131 DEBUG [RS_CLOSE_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing aa338dfb527d83c5946fe75777997918, disabling compactions & flushes 2024-11-11T04:36:18,131 INFO [RS:0;a7bef91497aa:34791 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-11T04:36:18,131 INFO [RS:0;a7bef91497aa:34791 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-11T04:36:18,131 INFO [RS:0;a7bef91497aa:34791 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-11T04:36:18,131 INFO [RS_CLOSE_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731299727659.aa338dfb527d83c5946fe75777997918. 2024-11-11T04:36:18,131 INFO [RS:0;a7bef91497aa:34791 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-11T04:36:18,132 DEBUG [RS_CLOSE_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731299727659.aa338dfb527d83c5946fe75777997918. 2024-11-11T04:36:18,132 DEBUG [RS_CLOSE_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731299727659.aa338dfb527d83c5946fe75777997918. after waiting 0 ms 2024-11-11T04:36:18,132 DEBUG [RS_CLOSE_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731299727659.aa338dfb527d83c5946fe75777997918. 2024-11-11T04:36:18,132 INFO [RS_CLOSE_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing aa338dfb527d83c5946fe75777997918 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-11T04:36:18,132 INFO [RS:0;a7bef91497aa:34791 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-11T04:36:18,132 DEBUG [RS:0;a7bef91497aa:34791 {}] regionserver.HRegionServer(1325): Online Regions={aa338dfb527d83c5946fe75777997918=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731299727659.aa338dfb527d83c5946fe75777997918., 1588230740=hbase:meta,,1.1588230740} 2024-11-11T04:36:18,132 DEBUG [RS:0;a7bef91497aa:34791 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, aa338dfb527d83c5946fe75777997918 2024-11-11T04:36:18,132 DEBUG [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-11T04:36:18,132 INFO [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-11T04:36:18,132 DEBUG [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-11T04:36:18,132 DEBUG [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-11T04:36:18,132 DEBUG [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-11T04:36:18,132 INFO [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.89 KB heapSize=3.91 KB 2024-11-11T04:36:18,136 DEBUG [RS_CLOSE_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aa338dfb527d83c5946fe75777997918/.tmp/info/0e7ae39547094b1eabfbcb234cffd0be is 1080, key is row0001/info:/1731299778117/Put/seqid=0 2024-11-11T04:36:18,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37899 is added to blk_1073741845_1021 (size=6033) 2024-11-11T04:36:18,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44287 is added to blk_1073741845_1021 (size=6033) 2024-11-11T04:36:18,141 INFO [RS_CLOSE_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=22 (bloomFilter=true), to=hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aa338dfb527d83c5946fe75777997918/.tmp/info/0e7ae39547094b1eabfbcb234cffd0be 2024-11-11T04:36:18,148 DEBUG [RS_CLOSE_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aa338dfb527d83c5946fe75777997918/.tmp/info/0e7ae39547094b1eabfbcb234cffd0be as hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aa338dfb527d83c5946fe75777997918/info/0e7ae39547094b1eabfbcb234cffd0be 2024-11-11T04:36:18,149 DEBUG [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/data/hbase/meta/1588230740/.tmp/info/284dfe25635a432e8bf552bebc77f3d3 is 227, key is TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731299727659.aa338dfb527d83c5946fe75777997918./info:regioninfo/1731299728011/Put/seqid=0 2024-11-11T04:36:18,153 INFO [RS_CLOSE_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aa338dfb527d83c5946fe75777997918/info/0e7ae39547094b1eabfbcb234cffd0be, entries=1, sequenceid=22, filesize=5.9 K 2024-11-11T04:36:18,154 INFO [RS_CLOSE_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for aa338dfb527d83c5946fe75777997918 in 22ms, sequenceid=22, compaction requested=true 2024-11-11T04:36:18,157 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731299727659.aa338dfb527d83c5946fe75777997918.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aa338dfb527d83c5946fe75777997918/info/844c18e720f74ff38811e4a962cf3d90, hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aa338dfb527d83c5946fe75777997918/info/7ad1272edf544fb8b3c2cb2edbde4e96, hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aa338dfb527d83c5946fe75777997918/info/2f77be844dc64b839102e7c3f7739afe] to archive 2024-11-11T04:36:18,158 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731299727659.aa338dfb527d83c5946fe75777997918.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-11T04:36:18,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37899 is added to blk_1073741846_1022 (size=7308) 2024-11-11T04:36:18,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44287 is added to blk_1073741846_1022 (size=7308) 2024-11-11T04:36:18,159 INFO [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.65 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/data/hbase/meta/1588230740/.tmp/info/284dfe25635a432e8bf552bebc77f3d3 2024-11-11T04:36:18,160 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731299727659.aa338dfb527d83c5946fe75777997918.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aa338dfb527d83c5946fe75777997918/info/844c18e720f74ff38811e4a962cf3d90 to hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aa338dfb527d83c5946fe75777997918/info/844c18e720f74ff38811e4a962cf3d90 2024-11-11T04:36:18,162 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731299727659.aa338dfb527d83c5946fe75777997918.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aa338dfb527d83c5946fe75777997918/info/7ad1272edf544fb8b3c2cb2edbde4e96 to hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aa338dfb527d83c5946fe75777997918/info/7ad1272edf544fb8b3c2cb2edbde4e96 2024-11-11T04:36:18,163 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731299727659.aa338dfb527d83c5946fe75777997918.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aa338dfb527d83c5946fe75777997918/info/2f77be844dc64b839102e7c3f7739afe to hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aa338dfb527d83c5946fe75777997918/info/2f77be844dc64b839102e7c3f7739afe 2024-11-11T04:36:18,163 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731299727659.aa338dfb527d83c5946fe75777997918.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=a7bef91497aa:46241 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-11-11T04:36:18,164 WARN [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731299727659.aa338dfb527d83c5946fe75777997918.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [844c18e720f74ff38811e4a962cf3d90=6033, 7ad1272edf544fb8b3c2cb2edbde4e96=6033, 2f77be844dc64b839102e7c3f7739afe=6033] 2024-11-11T04:36:18,167 DEBUG [RS_CLOSE_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aa338dfb527d83c5946fe75777997918/recovered.edits/25.seqid, newMaxSeqId=25, maxSeqId=1 2024-11-11T04:36:18,168 INFO [RS_CLOSE_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731299727659.aa338dfb527d83c5946fe75777997918. 2024-11-11T04:36:18,168 DEBUG [RS_CLOSE_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for aa338dfb527d83c5946fe75777997918: Waiting for close lock at 1731299778131Running coprocessor pre-close hooks at 1731299778131Disabling compacts and flushes for region at 1731299778131Disabling writes for close at 1731299778132 (+1 ms)Obtaining lock to block concurrent updates at 1731299778132Preparing flush snapshotting stores in aa338dfb527d83c5946fe75777997918 at 1731299778132Finished memstore snapshotting TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731299727659.aa338dfb527d83c5946fe75777997918., syncing WAL and waiting on mvcc, flushsize=dataSize=1076, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1731299778132Flushing stores of TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731299727659.aa338dfb527d83c5946fe75777997918. at 1731299778133 (+1 ms)Flushing aa338dfb527d83c5946fe75777997918/info: creating writer at 1731299778133Flushing aa338dfb527d83c5946fe75777997918/info: appending metadata at 1731299778136 (+3 ms)Flushing aa338dfb527d83c5946fe75777997918/info: closing flushed file at 1731299778136Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@72b2d581: reopening flushed file at 1731299778147 (+11 ms)Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for aa338dfb527d83c5946fe75777997918 in 22ms, sequenceid=22, compaction requested=true at 1731299778154 (+7 ms)Writing region close event to WAL at 1731299778164 (+10 ms)Running coprocessor post-close hooks at 1731299778168 (+4 ms)Closed at 1731299778168 2024-11-11T04:36:18,168 DEBUG [RS_CLOSE_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731299727659.aa338dfb527d83c5946fe75777997918. 2024-11-11T04:36:18,179 DEBUG [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/data/hbase/meta/1588230740/.tmp/ns/4ab94af57fd445ba9478a727a9c23fa2 is 43, key is default/ns:d/1731299727542/Put/seqid=0 2024-11-11T04:36:18,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44287 is added to blk_1073741847_1023 (size=5153) 2024-11-11T04:36:18,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37899 is added to blk_1073741847_1023 (size=5153) 2024-11-11T04:36:18,184 INFO [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/data/hbase/meta/1588230740/.tmp/ns/4ab94af57fd445ba9478a727a9c23fa2 2024-11-11T04:36:18,203 DEBUG [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/data/hbase/meta/1588230740/.tmp/table/f337441f6d384423910f4d0cbabc62b0 is 89, key is TestLogRolling-testCompactionRecordDoesntBlockRolling/table:state/1731299728020/Put/seqid=0 2024-11-11T04:36:18,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37899 is added to blk_1073741848_1024 (size=5508) 2024-11-11T04:36:18,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44287 is added to blk_1073741848_1024 (size=5508) 2024-11-11T04:36:18,208 INFO [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=170 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/data/hbase/meta/1588230740/.tmp/table/f337441f6d384423910f4d0cbabc62b0 2024-11-11T04:36:18,214 DEBUG [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/data/hbase/meta/1588230740/.tmp/info/284dfe25635a432e8bf552bebc77f3d3 as hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/data/hbase/meta/1588230740/info/284dfe25635a432e8bf552bebc77f3d3 2024-11-11T04:36:18,219 INFO [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/data/hbase/meta/1588230740/info/284dfe25635a432e8bf552bebc77f3d3, entries=10, sequenceid=11, filesize=7.1 K 2024-11-11T04:36:18,219 DEBUG [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/data/hbase/meta/1588230740/.tmp/ns/4ab94af57fd445ba9478a727a9c23fa2 as hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/data/hbase/meta/1588230740/ns/4ab94af57fd445ba9478a727a9c23fa2 2024-11-11T04:36:18,224 INFO [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/data/hbase/meta/1588230740/ns/4ab94af57fd445ba9478a727a9c23fa2, entries=2, sequenceid=11, filesize=5.0 K 2024-11-11T04:36:18,224 DEBUG [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/data/hbase/meta/1588230740/.tmp/table/f337441f6d384423910f4d0cbabc62b0 as hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/data/hbase/meta/1588230740/table/f337441f6d384423910f4d0cbabc62b0 2024-11-11T04:36:18,229 INFO [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/data/hbase/meta/1588230740/table/f337441f6d384423910f4d0cbabc62b0, entries=2, sequenceid=11, filesize=5.4 K 2024-11-11T04:36:18,231 INFO [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 98ms, sequenceid=11, compaction requested=false 2024-11-11T04:36:18,235 DEBUG [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-11T04:36:18,235 DEBUG [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-11T04:36:18,235 INFO [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-11T04:36:18,235 DEBUG [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731299778132Running coprocessor pre-close hooks at 1731299778132Disabling compacts and flushes for region at 1731299778132Disabling writes for close at 1731299778132Obtaining lock to block concurrent updates at 1731299778132Preparing flush snapshotting stores in 1588230740 at 1731299778132Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1932, getHeapSize=3936, getOffHeapSize=0, getCellsCount=14 at 1731299778132Flushing stores of hbase:meta,,1.1588230740 at 1731299778133 (+1 ms)Flushing 1588230740/info: creating writer at 1731299778133Flushing 1588230740/info: appending metadata at 1731299778149 (+16 ms)Flushing 1588230740/info: closing flushed file at 1731299778149Flushing 1588230740/ns: creating writer at 1731299778165 (+16 ms)Flushing 1588230740/ns: appending metadata at 1731299778179 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1731299778179Flushing 1588230740/table: creating writer at 1731299778189 (+10 ms)Flushing 1588230740/table: appending metadata at 1731299778203 (+14 ms)Flushing 1588230740/table: closing flushed file at 1731299778203Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@38670320: reopening flushed file at 1731299778213 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@14d4e5de: reopening flushed file at 1731299778219 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7dd51636: reopening flushed file at 1731299778224 (+5 ms)Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 98ms, sequenceid=11, compaction requested=false at 1731299778231 (+7 ms)Writing region close event to WAL at 1731299778232 (+1 ms)Running coprocessor post-close hooks at 1731299778235 (+3 ms)Closed at 1731299778235 2024-11-11T04:36:18,236 DEBUG [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-11T04:36:18,332 INFO [RS:0;a7bef91497aa:34791 {}] regionserver.HRegionServer(976): stopping server a7bef91497aa,34791,1731299726729; all regions closed. 2024-11-11T04:36:18,332 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:36:18,333 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:36:18,333 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:36:18,333 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:36:18,333 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:36:18,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37899 is added to blk_1073741834_1010 (size=3306) 2024-11-11T04:36:18,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44287 is added to blk_1073741834_1010 (size=3306) 2024-11-11T04:36:18,337 DEBUG [RS:0;a7bef91497aa:34791 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/oldWALs 2024-11-11T04:36:18,337 INFO [RS:0;a7bef91497aa:34791 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog a7bef91497aa%2C34791%2C1731299726729.meta:.meta(num 1731299727490) 2024-11-11T04:36:18,337 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:36:18,338 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:36:18,338 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:36:18,338 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:36:18,338 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:36:18,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44287 is added to blk_1073741844_1020 (size=1252) 2024-11-11T04:36:18,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37899 is added to blk_1073741844_1020 (size=1252) 2024-11-11T04:36:18,342 DEBUG [RS:0;a7bef91497aa:34791 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/oldWALs 2024-11-11T04:36:18,342 INFO [RS:0;a7bef91497aa:34791 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog a7bef91497aa%2C34791%2C1731299726729:(num 1731299778118) 2024-11-11T04:36:18,342 DEBUG [RS:0;a7bef91497aa:34791 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T04:36:18,342 INFO [RS:0;a7bef91497aa:34791 {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T04:36:18,343 INFO [RS:0;a7bef91497aa:34791 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-11T04:36:18,343 INFO [RS:0;a7bef91497aa:34791 {}] hbase.ChoreService(370): Chore service for: regionserver/a7bef91497aa:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-11T04:36:18,343 INFO [RS:0;a7bef91497aa:34791 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-11T04:36:18,343 INFO [regionserver/a7bef91497aa:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-11T04:36:18,343 INFO [RS:0;a7bef91497aa:34791 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34791 2024-11-11T04:36:18,345 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46241-0x101959ccab90000, quorum=127.0.0.1:53725, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-11T04:36:18,345 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34791-0x101959ccab90001, quorum=127.0.0.1:53725, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/a7bef91497aa,34791,1731299726729 2024-11-11T04:36:18,345 INFO [RS:0;a7bef91497aa:34791 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-11T04:36:18,347 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [a7bef91497aa,34791,1731299726729] 2024-11-11T04:36:18,348 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/a7bef91497aa,34791,1731299726729 already deleted, retry=false 2024-11-11T04:36:18,348 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; a7bef91497aa,34791,1731299726729 expired; onlineServers=0 2024-11-11T04:36:18,348 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'a7bef91497aa,46241,1731299726686' ***** 2024-11-11T04:36:18,348 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-11T04:36:18,348 INFO [M:0;a7bef91497aa:46241 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-11T04:36:18,348 INFO [M:0;a7bef91497aa:46241 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-11T04:36:18,348 DEBUG [M:0;a7bef91497aa:46241 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-11T04:36:18,349 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-11T04:36:18,349 DEBUG [M:0;a7bef91497aa:46241 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-11T04:36:18,349 DEBUG [master/a7bef91497aa:0:becomeActiveMaster-HFileCleaner.large.0-1731299726886 {}] cleaner.HFileCleaner(306): Exit Thread[master/a7bef91497aa:0:becomeActiveMaster-HFileCleaner.large.0-1731299726886,5,FailOnTimeoutGroup] 2024-11-11T04:36:18,349 DEBUG [master/a7bef91497aa:0:becomeActiveMaster-HFileCleaner.small.0-1731299726886 {}] cleaner.HFileCleaner(306): Exit Thread[master/a7bef91497aa:0:becomeActiveMaster-HFileCleaner.small.0-1731299726886,5,FailOnTimeoutGroup] 2024-11-11T04:36:18,349 INFO [M:0;a7bef91497aa:46241 {}] hbase.ChoreService(370): Chore service for: master/a7bef91497aa:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-11T04:36:18,349 INFO [M:0;a7bef91497aa:46241 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-11T04:36:18,349 DEBUG [M:0;a7bef91497aa:46241 {}] master.HMaster(1795): Stopping service threads 2024-11-11T04:36:18,349 INFO [M:0;a7bef91497aa:46241 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-11T04:36:18,349 INFO [M:0;a7bef91497aa:46241 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-11T04:36:18,349 INFO [M:0;a7bef91497aa:46241 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-11T04:36:18,349 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-11T04:36:18,350 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46241-0x101959ccab90000, quorum=127.0.0.1:53725, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-11T04:36:18,350 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46241-0x101959ccab90000, quorum=127.0.0.1:53725, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:36:18,350 DEBUG [M:0;a7bef91497aa:46241 {}] zookeeper.ZKUtil(347): master:46241-0x101959ccab90000, quorum=127.0.0.1:53725, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-11T04:36:18,350 WARN [M:0;a7bef91497aa:46241 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-11T04:36:18,351 INFO [M:0;a7bef91497aa:46241 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/.lastflushedseqids 2024-11-11T04:36:18,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37899 is added to blk_1073741849_1025 (size=130) 2024-11-11T04:36:18,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44287 is added to blk_1073741849_1025 (size=130) 2024-11-11T04:36:18,356 INFO [M:0;a7bef91497aa:46241 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-11T04:36:18,356 INFO [M:0;a7bef91497aa:46241 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-11T04:36:18,356 DEBUG [M:0;a7bef91497aa:46241 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-11T04:36:18,356 INFO [M:0;a7bef91497aa:46241 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T04:36:18,356 DEBUG [M:0;a7bef91497aa:46241 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T04:36:18,356 DEBUG [M:0;a7bef91497aa:46241 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-11T04:36:18,356 DEBUG [M:0;a7bef91497aa:46241 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T04:36:18,357 INFO [M:0;a7bef91497aa:46241 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=43.56 KB heapSize=54.94 KB 2024-11-11T04:36:18,372 DEBUG [M:0;a7bef91497aa:46241 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f0e0efca03394fcf8717373b27918524 is 82, key is hbase:meta,,1/info:regioninfo/1731299727525/Put/seqid=0 2024-11-11T04:36:18,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37899 is added to blk_1073741850_1026 (size=5672) 2024-11-11T04:36:18,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44287 is added to blk_1073741850_1026 (size=5672) 2024-11-11T04:36:18,377 INFO [M:0;a7bef91497aa:46241 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f0e0efca03394fcf8717373b27918524 2024-11-11T04:36:18,396 DEBUG [M:0;a7bef91497aa:46241 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c2b5ea2c38594eda91272d69be731a02 is 798, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731299728024/Put/seqid=0 2024-11-11T04:36:18,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37899 is added to blk_1073741851_1027 (size=7819) 2024-11-11T04:36:18,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44287 is added to blk_1073741851_1027 (size=7819) 2024-11-11T04:36:18,401 INFO [M:0;a7bef91497aa:46241 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.95 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c2b5ea2c38594eda91272d69be731a02 2024-11-11T04:36:18,406 INFO [M:0;a7bef91497aa:46241 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for c2b5ea2c38594eda91272d69be731a02 2024-11-11T04:36:18,420 DEBUG [M:0;a7bef91497aa:46241 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d99f5d0785d3456b8f1422c8c7031833 is 69, key is a7bef91497aa,34791,1731299726729/rs:state/1731299726965/Put/seqid=0 2024-11-11T04:36:18,425 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37899 is added to blk_1073741852_1028 (size=5156) 2024-11-11T04:36:18,425 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44287 is added to blk_1073741852_1028 (size=5156) 2024-11-11T04:36:18,426 INFO [M:0;a7bef91497aa:46241 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d99f5d0785d3456b8f1422c8c7031833 2024-11-11T04:36:18,426 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:18,427 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:18,444 DEBUG [M:0;a7bef91497aa:46241 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/98224f2819f64bc4951d463b2b1f55be is 52, key is load_balancer_on/state:d/1731299727655/Put/seqid=0 2024-11-11T04:36:18,447 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34791-0x101959ccab90001, quorum=127.0.0.1:53725, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T04:36:18,447 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34791-0x101959ccab90001, quorum=127.0.0.1:53725, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T04:36:18,447 INFO [RS:0;a7bef91497aa:34791 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-11T04:36:18,447 INFO [RS:0;a7bef91497aa:34791 {}] regionserver.HRegionServer(1031): Exiting; stopping=a7bef91497aa,34791,1731299726729; zookeeper connection closed. 2024-11-11T04:36:18,448 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@22063c7 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@22063c7 2024-11-11T04:36:18,448 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-11T04:36:18,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44287 is added to blk_1073741853_1029 (size=5056) 2024-11-11T04:36:18,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37899 is added to blk_1073741853_1029 (size=5056) 2024-11-11T04:36:18,449 INFO [M:0;a7bef91497aa:46241 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/98224f2819f64bc4951d463b2b1f55be 2024-11-11T04:36:18,454 DEBUG [M:0;a7bef91497aa:46241 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f0e0efca03394fcf8717373b27918524 as hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/f0e0efca03394fcf8717373b27918524 2024-11-11T04:36:18,458 INFO [M:0;a7bef91497aa:46241 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/f0e0efca03394fcf8717373b27918524, entries=8, sequenceid=121, filesize=5.5 K 2024-11-11T04:36:18,459 DEBUG [M:0;a7bef91497aa:46241 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c2b5ea2c38594eda91272d69be731a02 as hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/c2b5ea2c38594eda91272d69be731a02 2024-11-11T04:36:18,463 INFO [M:0;a7bef91497aa:46241 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for c2b5ea2c38594eda91272d69be731a02 2024-11-11T04:36:18,463 INFO [M:0;a7bef91497aa:46241 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/c2b5ea2c38594eda91272d69be731a02, entries=14, sequenceid=121, filesize=7.6 K 2024-11-11T04:36:18,464 DEBUG [M:0;a7bef91497aa:46241 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d99f5d0785d3456b8f1422c8c7031833 as hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/d99f5d0785d3456b8f1422c8c7031833 2024-11-11T04:36:18,468 INFO [M:0;a7bef91497aa:46241 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/d99f5d0785d3456b8f1422c8c7031833, entries=1, sequenceid=121, filesize=5.0 K 2024-11-11T04:36:18,468 DEBUG [M:0;a7bef91497aa:46241 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/98224f2819f64bc4951d463b2b1f55be as hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/98224f2819f64bc4951d463b2b1f55be 2024-11-11T04:36:18,472 INFO [M:0;a7bef91497aa:46241 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42763/user/jenkins/test-data/4e5bfb9b-8f9a-1666-3a74-deec1c82d70d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/98224f2819f64bc4951d463b2b1f55be, entries=1, sequenceid=121, filesize=4.9 K 2024-11-11T04:36:18,473 INFO [M:0;a7bef91497aa:46241 {}] regionserver.HRegion(3140): Finished flush of dataSize ~43.56 KB/44602, heapSize ~54.88 KB/56192, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 117ms, sequenceid=121, compaction requested=false 2024-11-11T04:36:18,475 INFO [M:0;a7bef91497aa:46241 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T04:36:18,475 DEBUG [M:0;a7bef91497aa:46241 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731299778356Disabling compacts and flushes for region at 1731299778356Disabling writes for close at 1731299778356Obtaining lock to block concurrent updates at 1731299778357 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731299778357Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=44602, getHeapSize=56192, getOffHeapSize=0, getCellsCount=140 at 1731299778357Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731299778358 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731299778358Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731299778372 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731299778372Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731299778382 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731299778396 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731299778396Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731299778406 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731299778420 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731299778420Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731299778430 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731299778443 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731299778443Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@42fde40d: reopening flushed file at 1731299778453 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@14e9282c: reopening flushed file at 1731299778458 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4fd3efe7: reopening flushed file at 1731299778463 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1e99d31: reopening flushed file at 1731299778468 (+5 ms)Finished flush of dataSize ~43.56 KB/44602, heapSize ~54.88 KB/56192, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 117ms, sequenceid=121, compaction requested=false at 1731299778473 (+5 ms)Writing region close event to WAL at 1731299778474 (+1 ms)Closed at 1731299778474 2024-11-11T04:36:18,475 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:36:18,475 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:36:18,475 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:36:18,475 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:36:18,475 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:36:18,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44287 is added to blk_1073741830_1006 (size=52999) 2024-11-11T04:36:18,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37899 is added to blk_1073741830_1006 (size=52999) 2024-11-11T04:36:18,478 INFO [M:0;a7bef91497aa:46241 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-11T04:36:18,478 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-11T04:36:18,478 INFO [M:0;a7bef91497aa:46241 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46241 2024-11-11T04:36:18,478 INFO [M:0;a7bef91497aa:46241 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-11T04:36:18,580 INFO [M:0;a7bef91497aa:46241 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-11T04:36:18,580 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46241-0x101959ccab90000, quorum=127.0.0.1:53725, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T04:36:18,580 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46241-0x101959ccab90000, quorum=127.0.0.1:53725, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T04:36:18,582 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@22fd1e24{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T04:36:18,582 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@43498b11{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T04:36:18,582 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T04:36:18,582 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2fa194e5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T04:36:18,582 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@39212263{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/08105350-1506-3bfb-d3b2-39b6807a19d8/hadoop.log.dir/,STOPPED} 2024-11-11T04:36:18,584 WARN [BP-601493080-172.17.0.2-1731299726058 heartbeating to localhost/127.0.0.1:42763 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T04:36:18,584 WARN [BP-601493080-172.17.0.2-1731299726058 heartbeating to localhost/127.0.0.1:42763 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-601493080-172.17.0.2-1731299726058 (Datanode Uuid 61e3f1dd-81d1-49a9-a0b8-0b9350008616) service to localhost/127.0.0.1:42763 2024-11-11T04:36:18,584 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T04:36:18,584 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T04:36:18,585 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/08105350-1506-3bfb-d3b2-39b6807a19d8/cluster_c999db2f-a4c1-1db4-9a28-027f8575ecbe/data/data3/current/BP-601493080-172.17.0.2-1731299726058 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T04:36:18,585 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/08105350-1506-3bfb-d3b2-39b6807a19d8/cluster_c999db2f-a4c1-1db4-9a28-027f8575ecbe/data/data4/current/BP-601493080-172.17.0.2-1731299726058 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T04:36:18,585 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T04:36:18,587 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@8d4c846{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T04:36:18,587 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1330929b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T04:36:18,587 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T04:36:18,587 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3fa18b90{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T04:36:18,587 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2ac0122b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/08105350-1506-3bfb-d3b2-39b6807a19d8/hadoop.log.dir/,STOPPED} 2024-11-11T04:36:18,589 WARN [BP-601493080-172.17.0.2-1731299726058 heartbeating to localhost/127.0.0.1:42763 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T04:36:18,589 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T04:36:18,589 WARN [BP-601493080-172.17.0.2-1731299726058 heartbeating to localhost/127.0.0.1:42763 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-601493080-172.17.0.2-1731299726058 (Datanode Uuid 184fd366-2b2a-44b5-8958-3f8c8ec3d828) service to localhost/127.0.0.1:42763 2024-11-11T04:36:18,589 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T04:36:18,589 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/08105350-1506-3bfb-d3b2-39b6807a19d8/cluster_c999db2f-a4c1-1db4-9a28-027f8575ecbe/data/data1/current/BP-601493080-172.17.0.2-1731299726058 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T04:36:18,589 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/08105350-1506-3bfb-d3b2-39b6807a19d8/cluster_c999db2f-a4c1-1db4-9a28-027f8575ecbe/data/data2/current/BP-601493080-172.17.0.2-1731299726058 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T04:36:18,590 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T04:36:18,596 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@c464107{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-11T04:36:18,596 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@62558ec9{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T04:36:18,596 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T04:36:18,596 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@232381c8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T04:36:18,596 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@79c156a5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/08105350-1506-3bfb-d3b2-39b6807a19d8/hadoop.log.dir/,STOPPED} 2024-11-11T04:36:18,602 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-11T04:36:18,618 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-11T04:36:18,626 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=205 (was 179) Potentially hanging thread: nioEventLoopGroup-37-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42763 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:42763 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins.hfs.5@localhost:42763 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:42763 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:42763 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-35-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:42763 from jenkins.hfs.5 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-12-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42763 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42763 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/a7bef91497aa:0.leaseChecker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.regionserver.LeaseManager.run(LeaseManager.java:82) Potentially hanging thread: nioEventLoopGroup-34-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=483 (was 457) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=4 (was 9), ProcessCount=11 (was 11), AvailableMemoryMB=6585 (was 6630) 2024-11-11T04:36:18,634 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRolling Thread=205, OpenFileDescriptor=483, MaxFileDescriptor=1048576, SystemLoadAverage=4, ProcessCount=11, AvailableMemoryMB=6585 2024-11-11T04:36:18,634 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-11T04:36:18,634 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/08105350-1506-3bfb-d3b2-39b6807a19d8/hadoop.log.dir so I do NOT create it in target/test-data/2228693d-6e20-1217-9fca-ff7b84c6b80a 2024-11-11T04:36:18,634 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/08105350-1506-3bfb-d3b2-39b6807a19d8/hadoop.tmp.dir so I do NOT create it in target/test-data/2228693d-6e20-1217-9fca-ff7b84c6b80a 2024-11-11T04:36:18,634 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2228693d-6e20-1217-9fca-ff7b84c6b80a/cluster_052c37d9-502d-aa7a-5c87-77b2a5202b86, deleteOnExit=true 2024-11-11T04:36:18,634 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-11T04:36:18,634 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2228693d-6e20-1217-9fca-ff7b84c6b80a/test.cache.data in system properties and HBase conf 2024-11-11T04:36:18,634 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2228693d-6e20-1217-9fca-ff7b84c6b80a/hadoop.tmp.dir in system properties and HBase conf 2024-11-11T04:36:18,634 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2228693d-6e20-1217-9fca-ff7b84c6b80a/hadoop.log.dir in system properties and HBase conf 2024-11-11T04:36:18,634 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2228693d-6e20-1217-9fca-ff7b84c6b80a/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-11T04:36:18,634 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2228693d-6e20-1217-9fca-ff7b84c6b80a/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-11T04:36:18,634 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-11T04:36:18,634 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-11T04:36:18,635 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2228693d-6e20-1217-9fca-ff7b84c6b80a/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-11T04:36:18,635 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2228693d-6e20-1217-9fca-ff7b84c6b80a/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-11T04:36:18,635 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2228693d-6e20-1217-9fca-ff7b84c6b80a/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-11T04:36:18,635 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2228693d-6e20-1217-9fca-ff7b84c6b80a/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-11T04:36:18,635 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2228693d-6e20-1217-9fca-ff7b84c6b80a/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-11T04:36:18,635 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2228693d-6e20-1217-9fca-ff7b84c6b80a/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-11T04:36:18,635 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2228693d-6e20-1217-9fca-ff7b84c6b80a/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-11T04:36:18,635 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2228693d-6e20-1217-9fca-ff7b84c6b80a/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-11T04:36:18,635 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2228693d-6e20-1217-9fca-ff7b84c6b80a/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-11T04:36:18,635 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2228693d-6e20-1217-9fca-ff7b84c6b80a/nfs.dump.dir in system properties and HBase conf 2024-11-11T04:36:18,635 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2228693d-6e20-1217-9fca-ff7b84c6b80a/java.io.tmpdir in system properties and HBase conf 2024-11-11T04:36:18,635 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2228693d-6e20-1217-9fca-ff7b84c6b80a/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-11T04:36:18,635 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2228693d-6e20-1217-9fca-ff7b84c6b80a/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-11T04:36:18,635 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2228693d-6e20-1217-9fca-ff7b84c6b80a/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-11T04:36:18,648 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-11T04:36:18,703 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T04:36:18,706 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T04:36:18,707 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T04:36:18,707 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T04:36:18,707 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-11T04:36:18,708 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T04:36:18,708 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@52d230c9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2228693d-6e20-1217-9fca-ff7b84c6b80a/hadoop.log.dir/,AVAILABLE} 2024-11-11T04:36:18,708 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1fda4535{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T04:36:18,819 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2489695e{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2228693d-6e20-1217-9fca-ff7b84c6b80a/java.io.tmpdir/jetty-localhost-45269-hadoop-hdfs-3_4_1-tests_jar-_-any-4993642958105850228/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-11T04:36:18,820 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@25570184{HTTP/1.1, (http/1.1)}{localhost:45269} 2024-11-11T04:36:18,820 INFO [Time-limited test {}] server.Server(415): Started @236622ms 2024-11-11T04:36:18,850 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-11T04:36:18,927 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T04:36:18,929 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T04:36:18,930 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T04:36:18,930 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T04:36:18,930 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-11T04:36:18,932 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@201bdbf9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2228693d-6e20-1217-9fca-ff7b84c6b80a/hadoop.log.dir/,AVAILABLE} 2024-11-11T04:36:18,932 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6f8f17a1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T04:36:18,981 INFO [regionserver/a7bef91497aa:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T04:36:19,056 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4a67ff9c{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2228693d-6e20-1217-9fca-ff7b84c6b80a/java.io.tmpdir/jetty-localhost-42625-hadoop-hdfs-3_4_1-tests_jar-_-any-12652755775359052079/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T04:36:19,057 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3bd9a438{HTTP/1.1, (http/1.1)}{localhost:42625} 2024-11-11T04:36:19,057 INFO [Time-limited test {}] server.Server(415): Started @236859ms 2024-11-11T04:36:19,058 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T04:36:19,085 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T04:36:19,087 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T04:36:19,088 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T04:36:19,088 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T04:36:19,088 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-11T04:36:19,089 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@16ccf5f4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2228693d-6e20-1217-9fca-ff7b84c6b80a/hadoop.log.dir/,AVAILABLE} 2024-11-11T04:36:19,089 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2735da07{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T04:36:19,145 WARN [Thread-1963 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2228693d-6e20-1217-9fca-ff7b84c6b80a/cluster_052c37d9-502d-aa7a-5c87-77b2a5202b86/data/data2/current/BP-1693491921-172.17.0.2-1731299778654/current, will proceed with Du for space computation calculation, 2024-11-11T04:36:19,145 WARN [Thread-1962 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2228693d-6e20-1217-9fca-ff7b84c6b80a/cluster_052c37d9-502d-aa7a-5c87-77b2a5202b86/data/data1/current/BP-1693491921-172.17.0.2-1731299778654/current, will proceed with Du for space computation calculation, 2024-11-11T04:36:19,161 WARN [Thread-1941 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T04:36:19,163 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xedd2709db5338f92 with lease ID 0x8310355e40ac59b3: Processing first storage report for DS-4de899bc-6d6a-415a-9711-4f3c128b4a2f from datanode DatanodeRegistration(127.0.0.1:46475, datanodeUuid=b545a042-33f7-456b-bd71-2763fff6b029, infoPort=45445, infoSecurePort=0, ipcPort=42299, storageInfo=lv=-57;cid=testClusterID;nsid=2120647967;c=1731299778654) 2024-11-11T04:36:19,163 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xedd2709db5338f92 with lease ID 0x8310355e40ac59b3: from storage DS-4de899bc-6d6a-415a-9711-4f3c128b4a2f node DatanodeRegistration(127.0.0.1:46475, datanodeUuid=b545a042-33f7-456b-bd71-2763fff6b029, infoPort=45445, infoSecurePort=0, ipcPort=42299, storageInfo=lv=-57;cid=testClusterID;nsid=2120647967;c=1731299778654), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T04:36:19,163 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xedd2709db5338f92 with lease ID 0x8310355e40ac59b3: Processing first storage report for DS-fc71d413-f8b6-48f9-be99-89eb2d569ffe from datanode DatanodeRegistration(127.0.0.1:46475, datanodeUuid=b545a042-33f7-456b-bd71-2763fff6b029, infoPort=45445, infoSecurePort=0, ipcPort=42299, storageInfo=lv=-57;cid=testClusterID;nsid=2120647967;c=1731299778654) 2024-11-11T04:36:19,163 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xedd2709db5338f92 with lease ID 0x8310355e40ac59b3: from storage DS-fc71d413-f8b6-48f9-be99-89eb2d569ffe node DatanodeRegistration(127.0.0.1:46475, datanodeUuid=b545a042-33f7-456b-bd71-2763fff6b029, infoPort=45445, infoSecurePort=0, ipcPort=42299, storageInfo=lv=-57;cid=testClusterID;nsid=2120647967;c=1731299778654), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T04:36:19,201 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2f932cc{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2228693d-6e20-1217-9fca-ff7b84c6b80a/java.io.tmpdir/jetty-localhost-34677-hadoop-hdfs-3_4_1-tests_jar-_-any-14170386315013805618/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T04:36:19,201 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5ff7780b{HTTP/1.1, (http/1.1)}{localhost:34677} 2024-11-11T04:36:19,201 INFO [Time-limited test {}] server.Server(415): Started @237003ms 2024-11-11T04:36:19,202 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T04:36:19,291 WARN [Thread-1988 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2228693d-6e20-1217-9fca-ff7b84c6b80a/cluster_052c37d9-502d-aa7a-5c87-77b2a5202b86/data/data3/current/BP-1693491921-172.17.0.2-1731299778654/current, will proceed with Du for space computation calculation, 2024-11-11T04:36:19,291 WARN [Thread-1989 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2228693d-6e20-1217-9fca-ff7b84c6b80a/cluster_052c37d9-502d-aa7a-5c87-77b2a5202b86/data/data4/current/BP-1693491921-172.17.0.2-1731299778654/current, will proceed with Du for space computation calculation, 2024-11-11T04:36:19,306 WARN [Thread-1977 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T04:36:19,309 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x995519d7c8940af8 with lease ID 0x8310355e40ac59b4: Processing first storage report for DS-e1add078-291f-4b9b-a67c-27ea3f0573f9 from datanode DatanodeRegistration(127.0.0.1:38995, datanodeUuid=805913f2-e027-42e2-b57f-203e3a22905f, infoPort=36417, infoSecurePort=0, ipcPort=33851, storageInfo=lv=-57;cid=testClusterID;nsid=2120647967;c=1731299778654) 2024-11-11T04:36:19,309 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x995519d7c8940af8 with lease ID 0x8310355e40ac59b4: from storage DS-e1add078-291f-4b9b-a67c-27ea3f0573f9 node DatanodeRegistration(127.0.0.1:38995, datanodeUuid=805913f2-e027-42e2-b57f-203e3a22905f, infoPort=36417, infoSecurePort=0, ipcPort=33851, storageInfo=lv=-57;cid=testClusterID;nsid=2120647967;c=1731299778654), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T04:36:19,309 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x995519d7c8940af8 with lease ID 0x8310355e40ac59b4: Processing first storage report for DS-2457fa03-eb0c-4f54-8b8b-6c29f0796f94 from datanode DatanodeRegistration(127.0.0.1:38995, datanodeUuid=805913f2-e027-42e2-b57f-203e3a22905f, infoPort=36417, infoSecurePort=0, ipcPort=33851, storageInfo=lv=-57;cid=testClusterID;nsid=2120647967;c=1731299778654) 2024-11-11T04:36:19,309 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x995519d7c8940af8 with lease ID 0x8310355e40ac59b4: from storage DS-2457fa03-eb0c-4f54-8b8b-6c29f0796f94 node DatanodeRegistration(127.0.0.1:38995, datanodeUuid=805913f2-e027-42e2-b57f-203e3a22905f, infoPort=36417, infoSecurePort=0, ipcPort=33851, storageInfo=lv=-57;cid=testClusterID;nsid=2120647967;c=1731299778654), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T04:36:19,322 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2228693d-6e20-1217-9fca-ff7b84c6b80a 2024-11-11T04:36:19,325 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2228693d-6e20-1217-9fca-ff7b84c6b80a/cluster_052c37d9-502d-aa7a-5c87-77b2a5202b86/zookeeper_0, clientPort=55813, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2228693d-6e20-1217-9fca-ff7b84c6b80a/cluster_052c37d9-502d-aa7a-5c87-77b2a5202b86/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2228693d-6e20-1217-9fca-ff7b84c6b80a/cluster_052c37d9-502d-aa7a-5c87-77b2a5202b86/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-11T04:36:19,326 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=55813 2024-11-11T04:36:19,326 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T04:36:19,328 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T04:36:19,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46475 is added to blk_1073741825_1001 (size=7) 2024-11-11T04:36:19,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38995 is added to blk_1073741825_1001 (size=7) 2024-11-11T04:36:19,337 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf with version=8 2024-11-11T04:36:19,337 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/hbase-staging 2024-11-11T04:36:19,338 INFO [Time-limited test {}] client.ConnectionUtils(128): master/a7bef91497aa:0 server-side Connection retries=45 2024-11-11T04:36:19,338 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T04:36:19,339 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-11T04:36:19,339 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-11T04:36:19,339 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T04:36:19,339 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-11T04:36:19,339 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-11T04:36:19,339 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-11T04:36:19,339 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:43893 2024-11-11T04:36:19,341 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:43893 connecting to ZooKeeper ensemble=127.0.0.1:55813 2024-11-11T04:36:19,346 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:438930x0, quorum=127.0.0.1:55813, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-11T04:36:19,347 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:43893-0x101959d98640000 connected 2024-11-11T04:36:19,361 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T04:36:19,362 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T04:36:19,364 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:43893-0x101959d98640000, quorum=127.0.0.1:55813, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T04:36:19,364 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf, hbase.cluster.distributed=false 2024-11-11T04:36:19,365 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:43893-0x101959d98640000, quorum=127.0.0.1:55813, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-11T04:36:19,366 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43893 2024-11-11T04:36:19,366 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43893 2024-11-11T04:36:19,366 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43893 2024-11-11T04:36:19,366 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43893 2024-11-11T04:36:19,367 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43893 2024-11-11T04:36:19,381 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/a7bef91497aa:0 server-side Connection retries=45 2024-11-11T04:36:19,381 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T04:36:19,381 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-11T04:36:19,381 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-11T04:36:19,381 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T04:36:19,381 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-11T04:36:19,381 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-11T04:36:19,381 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-11T04:36:19,382 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33973 2024-11-11T04:36:19,383 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:33973 connecting to ZooKeeper ensemble=127.0.0.1:55813 2024-11-11T04:36:19,384 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T04:36:19,385 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T04:36:19,389 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:339730x0, quorum=127.0.0.1:55813, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-11T04:36:19,389 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:339730x0, quorum=127.0.0.1:55813, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T04:36:19,389 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:33973-0x101959d98640001 connected 2024-11-11T04:36:19,389 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-11T04:36:19,390 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-11T04:36:19,390 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33973-0x101959d98640001, quorum=127.0.0.1:55813, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-11T04:36:19,391 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33973-0x101959d98640001, quorum=127.0.0.1:55813, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-11T04:36:19,392 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33973 2024-11-11T04:36:19,392 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33973 2024-11-11T04:36:19,392 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33973 2024-11-11T04:36:19,395 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33973 2024-11-11T04:36:19,396 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33973 2024-11-11T04:36:19,407 DEBUG [M:0;a7bef91497aa:43893 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;a7bef91497aa:43893 2024-11-11T04:36:19,407 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/a7bef91497aa,43893,1731299779338 2024-11-11T04:36:19,409 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43893-0x101959d98640000, quorum=127.0.0.1:55813, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T04:36:19,409 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33973-0x101959d98640001, quorum=127.0.0.1:55813, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T04:36:19,409 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:43893-0x101959d98640000, quorum=127.0.0.1:55813, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/a7bef91497aa,43893,1731299779338 2024-11-11T04:36:19,411 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33973-0x101959d98640001, quorum=127.0.0.1:55813, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-11T04:36:19,411 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43893-0x101959d98640000, quorum=127.0.0.1:55813, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:36:19,411 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33973-0x101959d98640001, quorum=127.0.0.1:55813, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:36:19,411 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:43893-0x101959d98640000, quorum=127.0.0.1:55813, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-11T04:36:19,411 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/a7bef91497aa,43893,1731299779338 from backup master directory 2024-11-11T04:36:19,413 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43893-0x101959d98640000, quorum=127.0.0.1:55813, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/a7bef91497aa,43893,1731299779338 2024-11-11T04:36:19,413 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43893-0x101959d98640000, quorum=127.0.0.1:55813, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T04:36:19,413 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33973-0x101959d98640001, quorum=127.0.0.1:55813, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T04:36:19,413 WARN [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-11T04:36:19,413 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=a7bef91497aa,43893,1731299779338 2024-11-11T04:36:19,417 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/hbase.id] with ID: 71312df2-ef72-4505-a1c9-d7ef5e55a882 2024-11-11T04:36:19,417 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/.tmp/hbase.id 2024-11-11T04:36:19,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38995 is added to blk_1073741826_1002 (size=42) 2024-11-11T04:36:19,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46475 is added to blk_1073741826_1002 (size=42) 2024-11-11T04:36:19,423 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/.tmp/hbase.id]:[hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/hbase.id] 2024-11-11T04:36:19,427 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:19,427 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:19,436 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T04:36:19,436 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-11T04:36:19,437 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-11T04:36:19,440 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43893-0x101959d98640000, quorum=127.0.0.1:55813, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:36:19,440 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33973-0x101959d98640001, quorum=127.0.0.1:55813, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:36:19,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46475 is added to blk_1073741827_1003 (size=196) 2024-11-11T04:36:19,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38995 is added to blk_1073741827_1003 (size=196) 2024-11-11T04:36:19,447 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-11T04:36:19,447 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-11T04:36:19,448 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T04:36:19,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38995 is added to blk_1073741828_1004 (size=1189) 2024-11-11T04:36:19,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46475 is added to blk_1073741828_1004 (size=1189) 2024-11-11T04:36:19,458 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/MasterData/data/master/store 2024-11-11T04:36:19,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46475 is added to blk_1073741829_1005 (size=34) 2024-11-11T04:36:19,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38995 is added to blk_1073741829_1005 (size=34) 2024-11-11T04:36:19,465 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T04:36:19,465 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-11T04:36:19,465 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T04:36:19,465 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T04:36:19,465 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-11T04:36:19,465 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T04:36:19,465 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T04:36:19,465 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731299779465Disabling compacts and flushes for region at 1731299779465Disabling writes for close at 1731299779465Writing region close event to WAL at 1731299779465Closed at 1731299779465 2024-11-11T04:36:19,466 WARN [master/a7bef91497aa:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/MasterData/data/master/store/.initializing 2024-11-11T04:36:19,466 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/MasterData/WALs/a7bef91497aa,43893,1731299779338 2024-11-11T04:36:19,468 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a7bef91497aa%2C43893%2C1731299779338, suffix=, logDir=hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/MasterData/WALs/a7bef91497aa,43893,1731299779338, archiveDir=hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/MasterData/oldWALs, maxLogs=10 2024-11-11T04:36:19,469 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor a7bef91497aa%2C43893%2C1731299779338.1731299779469 2024-11-11T04:36:19,473 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/MasterData/WALs/a7bef91497aa,43893,1731299779338/a7bef91497aa%2C43893%2C1731299779338.1731299779469 2024-11-11T04:36:19,474 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36417:36417),(127.0.0.1/127.0.0.1:45445:45445)] 2024-11-11T04:36:19,474 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-11T04:36:19,474 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T04:36:19,474 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:36:19,475 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:36:19,476 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:36:19,477 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-11T04:36:19,477 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:36:19,477 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T04:36:19,477 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:36:19,478 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-11T04:36:19,478 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:36:19,479 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T04:36:19,479 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:36:19,479 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-11T04:36:19,479 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:36:19,480 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T04:36:19,480 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:36:19,481 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-11T04:36:19,481 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:36:19,481 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T04:36:19,481 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:36:19,482 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:36:19,482 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:36:19,483 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:36:19,483 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:36:19,484 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-11T04:36:19,485 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:36:19,486 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T04:36:19,487 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=749736, jitterRate=-0.046662598848342896}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-11T04:36:19,487 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731299779475Initializing all the Stores at 1731299779475Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731299779475Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731299779475Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731299779475Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731299779475Cleaning up temporary data from old regions at 1731299779483 (+8 ms)Region opened successfully at 1731299779487 (+4 ms) 2024-11-11T04:36:19,487 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-11T04:36:19,490 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@77fb321c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=a7bef91497aa/172.17.0.2:0 2024-11-11T04:36:19,491 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-11T04:36:19,491 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-11T04:36:19,491 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-11T04:36:19,491 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-11T04:36:19,491 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-11T04:36:19,492 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-11T04:36:19,492 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-11T04:36:19,494 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-11T04:36:19,494 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43893-0x101959d98640000, quorum=127.0.0.1:55813, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-11T04:36:19,496 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-11T04:36:19,496 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-11T04:36:19,497 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43893-0x101959d98640000, quorum=127.0.0.1:55813, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-11T04:36:19,498 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-11T04:36:19,498 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-11T04:36:19,499 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43893-0x101959d98640000, quorum=127.0.0.1:55813, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-11T04:36:19,500 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-11T04:36:19,501 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43893-0x101959d98640000, quorum=127.0.0.1:55813, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-11T04:36:19,502 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-11T04:36:19,504 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43893-0x101959d98640000, quorum=127.0.0.1:55813, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-11T04:36:19,505 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-11T04:36:19,507 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33973-0x101959d98640001, quorum=127.0.0.1:55813, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-11T04:36:19,507 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43893-0x101959d98640000, quorum=127.0.0.1:55813, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-11T04:36:19,507 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33973-0x101959d98640001, quorum=127.0.0.1:55813, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:36:19,507 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43893-0x101959d98640000, quorum=127.0.0.1:55813, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:36:19,507 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=a7bef91497aa,43893,1731299779338, sessionid=0x101959d98640000, setting cluster-up flag (Was=false) 2024-11-11T04:36:19,510 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43893-0x101959d98640000, quorum=127.0.0.1:55813, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:36:19,510 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33973-0x101959d98640001, quorum=127.0.0.1:55813, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:36:19,515 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-11T04:36:19,516 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=a7bef91497aa,43893,1731299779338 2024-11-11T04:36:19,519 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33973-0x101959d98640001, quorum=127.0.0.1:55813, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:36:19,519 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43893-0x101959d98640000, quorum=127.0.0.1:55813, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:36:19,524 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-11T04:36:19,525 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=a7bef91497aa,43893,1731299779338 2024-11-11T04:36:19,526 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-11T04:36:19,528 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-11T04:36:19,528 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-11T04:36:19,528 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-11T04:36:19,528 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: a7bef91497aa,43893,1731299779338 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-11T04:36:19,530 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/a7bef91497aa:0, corePoolSize=5, maxPoolSize=5 2024-11-11T04:36:19,530 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/a7bef91497aa:0, corePoolSize=5, maxPoolSize=5 2024-11-11T04:36:19,530 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/a7bef91497aa:0, corePoolSize=5, maxPoolSize=5 2024-11-11T04:36:19,530 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/a7bef91497aa:0, corePoolSize=5, maxPoolSize=5 2024-11-11T04:36:19,530 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/a7bef91497aa:0, corePoolSize=10, maxPoolSize=10 2024-11-11T04:36:19,530 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:36:19,530 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/a7bef91497aa:0, corePoolSize=2, maxPoolSize=2 2024-11-11T04:36:19,530 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:36:19,532 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-11T04:36:19,532 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-11T04:36:19,532 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:36:19,533 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-11T04:36:19,534 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731299809534 2024-11-11T04:36:19,534 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-11T04:36:19,535 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-11T04:36:19,535 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-11T04:36:19,535 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-11T04:36:19,535 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-11T04:36:19,535 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-11T04:36:19,535 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-11T04:36:19,535 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-11T04:36:19,535 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-11T04:36:19,535 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-11T04:36:19,535 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-11T04:36:19,535 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-11T04:36:19,536 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/a7bef91497aa:0:becomeActiveMaster-HFileCleaner.large.0-1731299779535,5,FailOnTimeoutGroup] 2024-11-11T04:36:19,536 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/a7bef91497aa:0:becomeActiveMaster-HFileCleaner.small.0-1731299779536,5,FailOnTimeoutGroup] 2024-11-11T04:36:19,536 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-11T04:36:19,536 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-11T04:36:19,536 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-11T04:36:19,536 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-11T04:36:19,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38995 is added to blk_1073741831_1007 (size=1321) 2024-11-11T04:36:19,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46475 is added to blk_1073741831_1007 (size=1321) 2024-11-11T04:36:19,541 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-11T04:36:19,541 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf 2024-11-11T04:36:19,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38995 is added to blk_1073741832_1008 (size=32) 2024-11-11T04:36:19,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46475 is added to blk_1073741832_1008 (size=32) 2024-11-11T04:36:19,550 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T04:36:19,551 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-11T04:36:19,552 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-11T04:36:19,552 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:36:19,552 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T04:36:19,553 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-11T04:36:19,553 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-11T04:36:19,554 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:36:19,554 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T04:36:19,554 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-11T04:36:19,555 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-11T04:36:19,555 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:36:19,555 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T04:36:19,555 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-11T04:36:19,556 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-11T04:36:19,556 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:36:19,557 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T04:36:19,557 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-11T04:36:19,557 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/hbase/meta/1588230740 2024-11-11T04:36:19,558 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/hbase/meta/1588230740 2024-11-11T04:36:19,559 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-11T04:36:19,559 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-11T04:36:19,559 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-11T04:36:19,560 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-11T04:36:19,562 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T04:36:19,562 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=841320, jitterRate=0.06979380548000336}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-11T04:36:19,563 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731299779550Initializing all the Stores at 1731299779550Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731299779550Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731299779551 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731299779551Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731299779551Cleaning up temporary data from old regions at 1731299779559 (+8 ms)Region opened successfully at 1731299779563 (+4 ms) 2024-11-11T04:36:19,563 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-11T04:36:19,563 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-11T04:36:19,563 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-11T04:36:19,563 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-11T04:36:19,563 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-11T04:36:19,564 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-11T04:36:19,564 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731299779563Disabling compacts and flushes for region at 1731299779563Disabling writes for close at 1731299779563Writing region close event to WAL at 1731299779564 (+1 ms)Closed at 1731299779564 2024-11-11T04:36:19,565 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-11T04:36:19,565 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-11T04:36:19,565 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-11T04:36:19,567 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-11T04:36:19,568 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-11T04:36:19,597 INFO [RS:0;a7bef91497aa:33973 {}] regionserver.HRegionServer(746): ClusterId : 71312df2-ef72-4505-a1c9-d7ef5e55a882 2024-11-11T04:36:19,597 DEBUG [RS:0;a7bef91497aa:33973 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-11T04:36:19,599 DEBUG [RS:0;a7bef91497aa:33973 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-11T04:36:19,599 DEBUG [RS:0;a7bef91497aa:33973 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-11T04:36:19,601 DEBUG [RS:0;a7bef91497aa:33973 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-11T04:36:19,601 DEBUG [RS:0;a7bef91497aa:33973 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@f3d942c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=a7bef91497aa/172.17.0.2:0 2024-11-11T04:36:19,613 DEBUG [RS:0;a7bef91497aa:33973 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;a7bef91497aa:33973 2024-11-11T04:36:19,613 INFO [RS:0;a7bef91497aa:33973 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-11T04:36:19,613 INFO [RS:0;a7bef91497aa:33973 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-11T04:36:19,613 DEBUG [RS:0;a7bef91497aa:33973 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-11T04:36:19,614 INFO [RS:0;a7bef91497aa:33973 {}] regionserver.HRegionServer(2659): reportForDuty to master=a7bef91497aa,43893,1731299779338 with port=33973, startcode=1731299779381 2024-11-11T04:36:19,614 DEBUG [RS:0;a7bef91497aa:33973 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-11T04:36:19,616 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47169, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.6 (auth:SIMPLE), service=RegionServerStatusService 2024-11-11T04:36:19,617 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43893 {}] master.ServerManager(363): Checking decommissioned status of RegionServer a7bef91497aa,33973,1731299779381 2024-11-11T04:36:19,617 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43893 {}] master.ServerManager(517): Registering regionserver=a7bef91497aa,33973,1731299779381 2024-11-11T04:36:19,618 DEBUG [RS:0;a7bef91497aa:33973 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf 2024-11-11T04:36:19,618 DEBUG [RS:0;a7bef91497aa:33973 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:36477 2024-11-11T04:36:19,618 DEBUG [RS:0;a7bef91497aa:33973 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-11T04:36:19,620 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43893-0x101959d98640000, quorum=127.0.0.1:55813, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-11T04:36:19,620 DEBUG [RS:0;a7bef91497aa:33973 {}] zookeeper.ZKUtil(111): regionserver:33973-0x101959d98640001, quorum=127.0.0.1:55813, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/a7bef91497aa,33973,1731299779381 2024-11-11T04:36:19,621 WARN [RS:0;a7bef91497aa:33973 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-11T04:36:19,621 INFO [RS:0;a7bef91497aa:33973 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T04:36:19,621 DEBUG [RS:0;a7bef91497aa:33973 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/WALs/a7bef91497aa,33973,1731299779381 2024-11-11T04:36:19,621 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [a7bef91497aa,33973,1731299779381] 2024-11-11T04:36:19,624 INFO [RS:0;a7bef91497aa:33973 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-11T04:36:19,625 INFO [RS:0;a7bef91497aa:33973 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-11T04:36:19,626 INFO [RS:0;a7bef91497aa:33973 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-11T04:36:19,626 INFO [RS:0;a7bef91497aa:33973 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T04:36:19,626 INFO [RS:0;a7bef91497aa:33973 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-11T04:36:19,627 INFO [RS:0;a7bef91497aa:33973 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-11T04:36:19,627 INFO [RS:0;a7bef91497aa:33973 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-11T04:36:19,627 DEBUG [RS:0;a7bef91497aa:33973 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:36:19,627 DEBUG [RS:0;a7bef91497aa:33973 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:36:19,627 DEBUG [RS:0;a7bef91497aa:33973 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:36:19,627 DEBUG [RS:0;a7bef91497aa:33973 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:36:19,627 DEBUG [RS:0;a7bef91497aa:33973 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:36:19,627 DEBUG [RS:0;a7bef91497aa:33973 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/a7bef91497aa:0, corePoolSize=2, maxPoolSize=2 2024-11-11T04:36:19,627 DEBUG [RS:0;a7bef91497aa:33973 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:36:19,627 DEBUG [RS:0;a7bef91497aa:33973 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:36:19,627 DEBUG [RS:0;a7bef91497aa:33973 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:36:19,627 DEBUG [RS:0;a7bef91497aa:33973 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:36:19,627 DEBUG [RS:0;a7bef91497aa:33973 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:36:19,627 DEBUG [RS:0;a7bef91497aa:33973 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:36:19,627 DEBUG [RS:0;a7bef91497aa:33973 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/a7bef91497aa:0, corePoolSize=3, maxPoolSize=3 2024-11-11T04:36:19,628 DEBUG [RS:0;a7bef91497aa:33973 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/a7bef91497aa:0, corePoolSize=3, maxPoolSize=3 2024-11-11T04:36:19,628 INFO [RS:0;a7bef91497aa:33973 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T04:36:19,628 INFO [RS:0;a7bef91497aa:33973 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T04:36:19,628 INFO [RS:0;a7bef91497aa:33973 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T04:36:19,628 INFO [RS:0;a7bef91497aa:33973 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-11T04:36:19,628 INFO [RS:0;a7bef91497aa:33973 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-11T04:36:19,628 INFO [RS:0;a7bef91497aa:33973 {}] hbase.ChoreService(168): Chore ScheduledChore name=a7bef91497aa,33973,1731299779381-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-11T04:36:19,643 INFO [RS:0;a7bef91497aa:33973 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-11T04:36:19,643 INFO [RS:0;a7bef91497aa:33973 {}] hbase.ChoreService(168): Chore ScheduledChore name=a7bef91497aa,33973,1731299779381-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T04:36:19,643 INFO [RS:0;a7bef91497aa:33973 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T04:36:19,643 INFO [RS:0;a7bef91497aa:33973 {}] regionserver.Replication(171): a7bef91497aa,33973,1731299779381 started 2024-11-11T04:36:19,656 INFO [RS:0;a7bef91497aa:33973 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T04:36:19,656 INFO [RS:0;a7bef91497aa:33973 {}] regionserver.HRegionServer(1482): Serving as a7bef91497aa,33973,1731299779381, RpcServer on a7bef91497aa/172.17.0.2:33973, sessionid=0x101959d98640001 2024-11-11T04:36:19,657 DEBUG [RS:0;a7bef91497aa:33973 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-11T04:36:19,657 DEBUG [RS:0;a7bef91497aa:33973 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager a7bef91497aa,33973,1731299779381 2024-11-11T04:36:19,657 DEBUG [RS:0;a7bef91497aa:33973 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'a7bef91497aa,33973,1731299779381' 2024-11-11T04:36:19,657 DEBUG [RS:0;a7bef91497aa:33973 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-11T04:36:19,657 DEBUG [RS:0;a7bef91497aa:33973 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-11T04:36:19,658 DEBUG [RS:0;a7bef91497aa:33973 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-11T04:36:19,658 DEBUG [RS:0;a7bef91497aa:33973 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-11T04:36:19,658 DEBUG [RS:0;a7bef91497aa:33973 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager a7bef91497aa,33973,1731299779381 2024-11-11T04:36:19,658 DEBUG [RS:0;a7bef91497aa:33973 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'a7bef91497aa,33973,1731299779381' 2024-11-11T04:36:19,658 DEBUG [RS:0;a7bef91497aa:33973 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-11T04:36:19,658 DEBUG [RS:0;a7bef91497aa:33973 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-11T04:36:19,658 DEBUG [RS:0;a7bef91497aa:33973 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-11T04:36:19,658 INFO [RS:0;a7bef91497aa:33973 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-11T04:36:19,659 INFO [RS:0;a7bef91497aa:33973 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-11T04:36:19,718 WARN [a7bef91497aa:43893 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-11T04:36:19,760 INFO [RS:0;a7bef91497aa:33973 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a7bef91497aa%2C33973%2C1731299779381, suffix=, logDir=hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/WALs/a7bef91497aa,33973,1731299779381, archiveDir=hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/oldWALs, maxLogs=32 2024-11-11T04:36:19,761 INFO [RS:0;a7bef91497aa:33973 {}] monitor.StreamSlowMonitor(122): New stream slow monitor a7bef91497aa%2C33973%2C1731299779381.1731299779761 2024-11-11T04:36:19,766 INFO [RS:0;a7bef91497aa:33973 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/WALs/a7bef91497aa,33973,1731299779381/a7bef91497aa%2C33973%2C1731299779381.1731299779761 2024-11-11T04:36:19,767 DEBUG [RS:0;a7bef91497aa:33973 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36417:36417),(127.0.0.1/127.0.0.1:45445:45445)] 2024-11-11T04:36:19,968 DEBUG [a7bef91497aa:43893 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-11T04:36:19,969 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=a7bef91497aa,33973,1731299779381 2024-11-11T04:36:19,970 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as a7bef91497aa,33973,1731299779381, state=OPENING 2024-11-11T04:36:19,971 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-11T04:36:19,973 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33973-0x101959d98640001, quorum=127.0.0.1:55813, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:36:19,973 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43893-0x101959d98640000, quorum=127.0.0.1:55813, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:36:19,974 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T04:36:19,974 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-11T04:36:19,974 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T04:36:19,974 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=a7bef91497aa,33973,1731299779381}] 2024-11-11T04:36:20,127 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-11T04:36:20,129 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59855, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-11T04:36:20,132 INFO [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-11T04:36:20,132 INFO [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T04:36:20,134 INFO [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a7bef91497aa%2C33973%2C1731299779381.meta, suffix=.meta, logDir=hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/WALs/a7bef91497aa,33973,1731299779381, archiveDir=hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/oldWALs, maxLogs=32 2024-11-11T04:36:20,134 INFO [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor a7bef91497aa%2C33973%2C1731299779381.meta.1731299780134.meta 2024-11-11T04:36:20,139 INFO [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/WALs/a7bef91497aa,33973,1731299779381/a7bef91497aa%2C33973%2C1731299779381.meta.1731299780134.meta 2024-11-11T04:36:20,144 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45445:45445),(127.0.0.1/127.0.0.1:36417:36417)] 2024-11-11T04:36:20,146 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-11T04:36:20,146 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-11T04:36:20,146 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-11T04:36:20,146 INFO [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-11T04:36:20,146 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-11T04:36:20,146 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T04:36:20,146 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-11T04:36:20,146 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-11T04:36:20,147 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-11T04:36:20,148 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-11T04:36:20,148 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:36:20,149 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T04:36:20,149 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-11T04:36:20,149 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-11T04:36:20,149 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:36:20,150 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T04:36:20,150 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-11T04:36:20,150 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-11T04:36:20,150 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:36:20,151 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T04:36:20,151 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-11T04:36:20,151 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-11T04:36:20,151 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:36:20,152 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T04:36:20,152 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-11T04:36:20,152 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/hbase/meta/1588230740 2024-11-11T04:36:20,153 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/hbase/meta/1588230740 2024-11-11T04:36:20,154 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-11T04:36:20,154 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-11T04:36:20,155 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-11T04:36:20,156 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-11T04:36:20,156 INFO [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=806797, jitterRate=0.025896579027175903}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-11T04:36:20,156 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-11T04:36:20,157 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731299780146Writing region info on filesystem at 1731299780146Initializing all the Stores at 1731299780147 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731299780147Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731299780147Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731299780147Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731299780147Cleaning up temporary data from old regions at 1731299780154 (+7 ms)Running coprocessor post-open hooks at 1731299780156 (+2 ms)Region opened successfully at 1731299780157 (+1 ms) 2024-11-11T04:36:20,158 INFO [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731299780127 2024-11-11T04:36:20,160 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-11T04:36:20,160 INFO [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-11T04:36:20,161 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=a7bef91497aa,33973,1731299779381 2024-11-11T04:36:20,161 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as a7bef91497aa,33973,1731299779381, state=OPEN 2024-11-11T04:36:20,166 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33973-0x101959d98640001, quorum=127.0.0.1:55813, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-11T04:36:20,166 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43893-0x101959d98640000, quorum=127.0.0.1:55813, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-11T04:36:20,167 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=a7bef91497aa,33973,1731299779381 2024-11-11T04:36:20,167 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T04:36:20,167 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T04:36:20,169 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-11T04:36:20,169 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=a7bef91497aa,33973,1731299779381 in 193 msec 2024-11-11T04:36:20,171 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-11T04:36:20,171 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 604 msec 2024-11-11T04:36:20,172 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-11T04:36:20,172 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-11T04:36:20,173 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-11T04:36:20,174 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=a7bef91497aa,33973,1731299779381, seqNum=-1] 2024-11-11T04:36:20,174 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T04:36:20,175 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48241, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T04:36:20,180 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 651 msec 2024-11-11T04:36:20,180 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731299780180, completionTime=-1 2024-11-11T04:36:20,180 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-11T04:36:20,180 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-11T04:36:20,182 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-11T04:36:20,182 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731299840182 2024-11-11T04:36:20,182 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731299900182 2024-11-11T04:36:20,182 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-11-11T04:36:20,182 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a7bef91497aa,43893,1731299779338-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T04:36:20,182 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a7bef91497aa,43893,1731299779338-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T04:36:20,182 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a7bef91497aa,43893,1731299779338-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T04:36:20,182 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-a7bef91497aa:43893, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T04:36:20,182 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-11T04:36:20,182 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-11T04:36:20,184 DEBUG [master/a7bef91497aa:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-11T04:36:20,186 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.773sec 2024-11-11T04:36:20,186 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-11T04:36:20,186 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-11T04:36:20,186 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-11T04:36:20,186 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-11T04:36:20,186 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-11T04:36:20,186 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a7bef91497aa,43893,1731299779338-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-11T04:36:20,186 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a7bef91497aa,43893,1731299779338-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-11T04:36:20,188 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-11T04:36:20,188 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-11T04:36:20,188 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a7bef91497aa,43893,1731299779338-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T04:36:20,197 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@444decb3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T04:36:20,197 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request a7bef91497aa,43893,-1 for getting cluster id 2024-11-11T04:36:20,198 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-11T04:36:20,199 DEBUG [HMaster-EventLoopGroup-14-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '71312df2-ef72-4505-a1c9-d7ef5e55a882' 2024-11-11T04:36:20,199 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-11T04:36:20,199 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "71312df2-ef72-4505-a1c9-d7ef5e55a882" 2024-11-11T04:36:20,199 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7bcfbd03, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T04:36:20,199 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [a7bef91497aa,43893,-1] 2024-11-11T04:36:20,200 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-11T04:36:20,200 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T04:36:20,201 INFO [HMaster-EventLoopGroup-14-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39332, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-11T04:36:20,201 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@77ee7ab7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T04:36:20,202 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-11T04:36:20,203 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=a7bef91497aa,33973,1731299779381, seqNum=-1] 2024-11-11T04:36:20,203 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T04:36:20,204 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57412, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T04:36:20,205 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=a7bef91497aa,43893,1731299779338 2024-11-11T04:36:20,205 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T04:36:20,207 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-11T04:36:20,208 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-11T04:36:20,208 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.AsyncConnectionImpl(321): The fetched master address is a7bef91497aa,43893,1731299779338 2024-11-11T04:36:20,209 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@2195603a 2024-11-11T04:36:20,209 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-11T04:36:20,210 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39348, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-11T04:36:20,210 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43893 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-11T04:36:20,210 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43893 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-11T04:36:20,211 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43893 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-11T04:36:20,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43893 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling 2024-11-11T04:36:20,213 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-11T04:36:20,213 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:36:20,214 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43893 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRolling" procId is: 4 2024-11-11T04:36:20,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43893 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-11T04:36:20,215 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-11T04:36:20,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46475 is added to blk_1073741835_1011 (size=381) 2024-11-11T04:36:20,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38995 is added to blk_1073741835_1011 (size=381) 2024-11-11T04:36:20,223 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 56b1ef1e5f926a0ee91a838978a47cf4, NAME => 'TestLogRolling-testLogRolling,,1731299780210.56b1ef1e5f926a0ee91a838978a47cf4.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf 2024-11-11T04:36:20,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46475 is added to blk_1073741836_1012 (size=64) 2024-11-11T04:36:20,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38995 is added to blk_1073741836_1012 (size=64) 2024-11-11T04:36:20,230 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731299780210.56b1ef1e5f926a0ee91a838978a47cf4.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T04:36:20,230 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 56b1ef1e5f926a0ee91a838978a47cf4, disabling compactions & flushes 2024-11-11T04:36:20,230 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731299780210.56b1ef1e5f926a0ee91a838978a47cf4. 2024-11-11T04:36:20,230 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731299780210.56b1ef1e5f926a0ee91a838978a47cf4. 2024-11-11T04:36:20,230 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731299780210.56b1ef1e5f926a0ee91a838978a47cf4. after waiting 0 ms 2024-11-11T04:36:20,230 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731299780210.56b1ef1e5f926a0ee91a838978a47cf4. 2024-11-11T04:36:20,230 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731299780210.56b1ef1e5f926a0ee91a838978a47cf4. 2024-11-11T04:36:20,230 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 56b1ef1e5f926a0ee91a838978a47cf4: Waiting for close lock at 1731299780230Disabling compacts and flushes for region at 1731299780230Disabling writes for close at 1731299780230Writing region close event to WAL at 1731299780230Closed at 1731299780230 2024-11-11T04:36:20,232 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-11T04:36:20,232 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRolling,,1731299780210.56b1ef1e5f926a0ee91a838978a47cf4.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1731299780232"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731299780232"}]},"ts":"1731299780232"} 2024-11-11T04:36:20,235 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-11T04:36:20,236 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-11T04:36:20,236 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731299780236"}]},"ts":"1731299780236"} 2024-11-11T04:36:20,238 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLING in hbase:meta 2024-11-11T04:36:20,238 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=56b1ef1e5f926a0ee91a838978a47cf4, ASSIGN}] 2024-11-11T04:36:20,240 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=56b1ef1e5f926a0ee91a838978a47cf4, ASSIGN 2024-11-11T04:36:20,240 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=56b1ef1e5f926a0ee91a838978a47cf4, ASSIGN; state=OFFLINE, location=a7bef91497aa,33973,1731299779381; forceNewPlan=false, retain=false 2024-11-11T04:36:20,391 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=56b1ef1e5f926a0ee91a838978a47cf4, regionState=OPENING, regionLocation=a7bef91497aa,33973,1731299779381 2024-11-11T04:36:20,393 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=56b1ef1e5f926a0ee91a838978a47cf4, ASSIGN because future has completed 2024-11-11T04:36:20,394 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 56b1ef1e5f926a0ee91a838978a47cf4, server=a7bef91497aa,33973,1731299779381}] 2024-11-11T04:36:20,427 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:20,428 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:20,550 INFO [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1731299780210.56b1ef1e5f926a0ee91a838978a47cf4. 2024-11-11T04:36:20,550 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 56b1ef1e5f926a0ee91a838978a47cf4, NAME => 'TestLogRolling-testLogRolling,,1731299780210.56b1ef1e5f926a0ee91a838978a47cf4.', STARTKEY => '', ENDKEY => ''} 2024-11-11T04:36:20,550 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 56b1ef1e5f926a0ee91a838978a47cf4 2024-11-11T04:36:20,551 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731299780210.56b1ef1e5f926a0ee91a838978a47cf4.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T04:36:20,551 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 56b1ef1e5f926a0ee91a838978a47cf4 2024-11-11T04:36:20,551 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 56b1ef1e5f926a0ee91a838978a47cf4 2024-11-11T04:36:20,552 INFO [StoreOpener-56b1ef1e5f926a0ee91a838978a47cf4-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 56b1ef1e5f926a0ee91a838978a47cf4 2024-11-11T04:36:20,553 INFO [StoreOpener-56b1ef1e5f926a0ee91a838978a47cf4-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 56b1ef1e5f926a0ee91a838978a47cf4 columnFamilyName info 2024-11-11T04:36:20,553 DEBUG [StoreOpener-56b1ef1e5f926a0ee91a838978a47cf4-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:36:20,554 INFO [StoreOpener-56b1ef1e5f926a0ee91a838978a47cf4-1 {}] regionserver.HStore(327): Store=56b1ef1e5f926a0ee91a838978a47cf4/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T04:36:20,554 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 56b1ef1e5f926a0ee91a838978a47cf4 2024-11-11T04:36:20,554 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/56b1ef1e5f926a0ee91a838978a47cf4 2024-11-11T04:36:20,555 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/56b1ef1e5f926a0ee91a838978a47cf4 2024-11-11T04:36:20,555 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 56b1ef1e5f926a0ee91a838978a47cf4 2024-11-11T04:36:20,555 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 56b1ef1e5f926a0ee91a838978a47cf4 2024-11-11T04:36:20,556 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 56b1ef1e5f926a0ee91a838978a47cf4 2024-11-11T04:36:20,558 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/56b1ef1e5f926a0ee91a838978a47cf4/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T04:36:20,558 INFO [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 56b1ef1e5f926a0ee91a838978a47cf4; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=720986, jitterRate=-0.08321940898895264}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-11T04:36:20,558 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 56b1ef1e5f926a0ee91a838978a47cf4 2024-11-11T04:36:20,559 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 56b1ef1e5f926a0ee91a838978a47cf4: Running coprocessor pre-open hook at 1731299780551Writing region info on filesystem at 1731299780551Initializing all the Stores at 1731299780551Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731299780551Cleaning up temporary data from old regions at 1731299780555 (+4 ms)Running coprocessor post-open hooks at 1731299780558 (+3 ms)Region opened successfully at 1731299780559 (+1 ms) 2024-11-11T04:36:20,560 INFO [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1731299780210.56b1ef1e5f926a0ee91a838978a47cf4., pid=6, masterSystemTime=1731299780546 2024-11-11T04:36:20,562 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1731299780210.56b1ef1e5f926a0ee91a838978a47cf4. 2024-11-11T04:36:20,562 INFO [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1731299780210.56b1ef1e5f926a0ee91a838978a47cf4. 2024-11-11T04:36:20,563 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=56b1ef1e5f926a0ee91a838978a47cf4, regionState=OPEN, openSeqNum=2, regionLocation=a7bef91497aa,33973,1731299779381 2024-11-11T04:36:20,565 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 56b1ef1e5f926a0ee91a838978a47cf4, server=a7bef91497aa,33973,1731299779381 because future has completed 2024-11-11T04:36:20,568 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-11T04:36:20,568 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 56b1ef1e5f926a0ee91a838978a47cf4, server=a7bef91497aa,33973,1731299779381 in 172 msec 2024-11-11T04:36:20,570 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-11T04:36:20,570 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=56b1ef1e5f926a0ee91a838978a47cf4, ASSIGN in 330 msec 2024-11-11T04:36:20,571 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-11T04:36:20,571 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731299780571"}]},"ts":"1731299780571"} 2024-11-11T04:36:20,573 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLED in hbase:meta 2024-11-11T04:36:20,574 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-11T04:36:20,575 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling in 363 msec 2024-11-11T04:36:21,428 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:21,428 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:22,428 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:22,429 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:23,169 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:36:23,169 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:36:23,169 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:36:23,169 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:36:23,169 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:36:23,169 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:36:23,170 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:36:23,170 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:36:23,184 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:36:23,184 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:36:23,184 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:36:23,184 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:36:23,185 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:36:23,185 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:36:23,187 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:36:23,187 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:36:23,188 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:36:23,189 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:36:23,429 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:23,429 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:23,693 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-11T04:36:23,694 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:36:23,694 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:36:23,694 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:36:23,695 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:36:23,695 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:36:23,695 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:36:23,695 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:36:23,696 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:36:23,715 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:36:23,716 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:36:23,716 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:36:23,716 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:36:23,717 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:36:23,717 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:36:23,720 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:36:23,720 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:36:23,720 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:36:23,723 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:36:24,429 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:24,429 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:25,430 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:25,430 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:25,624 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-11T04:36:25,624 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRolling' 2024-11-11T04:36:26,430 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:26,430 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:27,431 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:27,431 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:27,457 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-11-11T04:36:27,457 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-11T04:36:27,457 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-11T04:36:28,431 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:28,431 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:29,432 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:29,432 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:30,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43893 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-11T04:36:30,245 INFO [RPCClient-NioEventLoopGroup-4-7 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRolling completed 2024-11-11T04:36:30,245 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRolling,, stopping at row=TestLogRolling-testLogRolling ,, for max=2147483647 with caching=100 2024-11-11T04:36:30,247 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRolling 2024-11-11T04:36:30,247 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRolling,,1731299780210.56b1ef1e5f926a0ee91a838978a47cf4. 2024-11-11T04:36:30,250 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testLogRolling,,1731299780210.56b1ef1e5f926a0ee91a838978a47cf4., hostname=a7bef91497aa,33973,1731299779381, seqNum=2] 2024-11-11T04:36:30,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33973 {}] regionserver.HRegion(8855): Flush requested on 56b1ef1e5f926a0ee91a838978a47cf4 2024-11-11T04:36:30,261 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 56b1ef1e5f926a0ee91a838978a47cf4 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-11T04:36:30,280 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/56b1ef1e5f926a0ee91a838978a47cf4/.tmp/info/263fddda3b944421b33b887d1b44fc1a is 1080, key is row0001/info:/1731299790251/Put/seqid=0 2024-11-11T04:36:30,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38995 is added to blk_1073741837_1013 (size=12509) 2024-11-11T04:36:30,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46475 is added to blk_1073741837_1013 (size=12509) 2024-11-11T04:36:30,290 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/56b1ef1e5f926a0ee91a838978a47cf4/.tmp/info/263fddda3b944421b33b887d1b44fc1a 2024-11-11T04:36:30,297 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33973 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=56b1ef1e5f926a0ee91a838978a47cf4, server=a7bef91497aa,33973,1731299779381 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-11T04:36:30,297 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/56b1ef1e5f926a0ee91a838978a47cf4/.tmp/info/263fddda3b944421b33b887d1b44fc1a as hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/56b1ef1e5f926a0ee91a838978a47cf4/info/263fddda3b944421b33b887d1b44fc1a 2024-11-11T04:36:30,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33973 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:57412 deadline: 1731299800296, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=56b1ef1e5f926a0ee91a838978a47cf4, server=a7bef91497aa,33973,1731299779381 2024-11-11T04:36:30,303 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/56b1ef1e5f926a0ee91a838978a47cf4/info/263fddda3b944421b33b887d1b44fc1a, entries=7, sequenceid=11, filesize=12.2 K 2024-11-11T04:36:30,304 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=23.12 KB/23672 for 56b1ef1e5f926a0ee91a838978a47cf4 in 43ms, sequenceid=11, compaction requested=false 2024-11-11T04:36:30,304 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 56b1ef1e5f926a0ee91a838978a47cf4: 2024-11-11T04:36:30,320 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1731299780210.56b1ef1e5f926a0ee91a838978a47cf4., hostname=a7bef91497aa,33973,1731299779381, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1731299780210.56b1ef1e5f926a0ee91a838978a47cf4., hostname=a7bef91497aa,33973,1731299779381, seqNum=2, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=56b1ef1e5f926a0ee91a838978a47cf4, server=a7bef91497aa,33973,1731299779381 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-11T04:36:30,320 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1731299780210.56b1ef1e5f926a0ee91a838978a47cf4., hostname=a7bef91497aa,33973,1731299779381, seqNum=2 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=56b1ef1e5f926a0ee91a838978a47cf4, server=a7bef91497aa,33973,1731299779381 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-11T04:36:30,321 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,,1731299780210.56b1ef1e5f926a0ee91a838978a47cf4., hostname=a7bef91497aa,33973,1731299779381, seqNum=2 because the exception is null or not the one we care about 2024-11-11T04:36:30,432 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:30,432 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:31,433 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:31,433 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:32,434 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:32,434 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:32,959 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-11T04:36:32,960 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:36:32,960 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:36:32,960 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:36:32,961 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:36:32,961 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:36:32,961 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:36:32,961 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:36:32,962 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:36:32,982 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:36:32,982 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:36:32,982 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:36:32,983 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:36:32,983 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:36:32,983 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:36:32,989 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:36:32,989 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:36:32,989 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:36:32,993 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:36:33,434 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:33,434 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:34,435 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:34,435 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:35,435 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:35,435 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:36,436 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:36,436 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:37,436 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:37,436 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:38,437 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:38,437 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:39,437 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:39,437 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:40,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33973 {}] regionserver.HRegion(8855): Flush requested on 56b1ef1e5f926a0ee91a838978a47cf4 2024-11-11T04:36:40,326 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 56b1ef1e5f926a0ee91a838978a47cf4 1/1 column families, dataSize=24.17 KB heapSize=26.13 KB 2024-11-11T04:36:40,332 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/56b1ef1e5f926a0ee91a838978a47cf4/.tmp/info/8409f53c215d443c95299f3494de8b0d is 1080, key is row0008/info:/1731299790262/Put/seqid=0 2024-11-11T04:36:40,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38995 is added to blk_1073741838_1014 (size=29761) 2024-11-11T04:36:40,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46475 is added to blk_1073741838_1014 (size=29761) 2024-11-11T04:36:40,337 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.17 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/56b1ef1e5f926a0ee91a838978a47cf4/.tmp/info/8409f53c215d443c95299f3494de8b0d 2024-11-11T04:36:40,344 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/56b1ef1e5f926a0ee91a838978a47cf4/.tmp/info/8409f53c215d443c95299f3494de8b0d as hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/56b1ef1e5f926a0ee91a838978a47cf4/info/8409f53c215d443c95299f3494de8b0d 2024-11-11T04:36:40,348 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/56b1ef1e5f926a0ee91a838978a47cf4/info/8409f53c215d443c95299f3494de8b0d, entries=23, sequenceid=37, filesize=29.1 K 2024-11-11T04:36:40,349 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~24.17 KB/24748, heapSize ~26.11 KB/26736, currentSize=2.10 KB/2152 for 56b1ef1e5f926a0ee91a838978a47cf4 in 23ms, sequenceid=37, compaction requested=false 2024-11-11T04:36:40,349 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 56b1ef1e5f926a0ee91a838978a47cf4: 2024-11-11T04:36:40,350 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=41.3 K, sizeToCheck=16.0 K 2024-11-11T04:36:40,350 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T04:36:40,350 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/56b1ef1e5f926a0ee91a838978a47cf4/info/8409f53c215d443c95299f3494de8b0d because midkey is the same as first or last row 2024-11-11T04:36:40,438 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:40,438 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:41,439 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:41,439 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:42,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33973 {}] regionserver.HRegion(8855): Flush requested on 56b1ef1e5f926a0ee91a838978a47cf4 2024-11-11T04:36:42,338 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 56b1ef1e5f926a0ee91a838978a47cf4 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-11T04:36:42,343 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/56b1ef1e5f926a0ee91a838978a47cf4/.tmp/info/62e169c000a344cba3611f94bd4551cf is 1080, key is row0031/info:/1731299800328/Put/seqid=0 2024-11-11T04:36:42,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46475 is added to blk_1073741839_1015 (size=12509) 2024-11-11T04:36:42,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38995 is added to blk_1073741839_1015 (size=12509) 2024-11-11T04:36:42,350 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=47 (bloomFilter=true), to=hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/56b1ef1e5f926a0ee91a838978a47cf4/.tmp/info/62e169c000a344cba3611f94bd4551cf 2024-11-11T04:36:42,356 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/56b1ef1e5f926a0ee91a838978a47cf4/.tmp/info/62e169c000a344cba3611f94bd4551cf as hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/56b1ef1e5f926a0ee91a838978a47cf4/info/62e169c000a344cba3611f94bd4551cf 2024-11-11T04:36:42,361 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/56b1ef1e5f926a0ee91a838978a47cf4/info/62e169c000a344cba3611f94bd4551cf, entries=7, sequenceid=47, filesize=12.2 K 2024-11-11T04:36:42,362 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=14.71 KB/15064 for 56b1ef1e5f926a0ee91a838978a47cf4 in 24ms, sequenceid=47, compaction requested=true 2024-11-11T04:36:42,362 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 56b1ef1e5f926a0ee91a838978a47cf4: 2024-11-11T04:36:42,362 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=53.5 K, sizeToCheck=16.0 K 2024-11-11T04:36:42,362 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T04:36:42,362 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/56b1ef1e5f926a0ee91a838978a47cf4/info/8409f53c215d443c95299f3494de8b0d because midkey is the same as first or last row 2024-11-11T04:36:42,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33973 {}] regionserver.HRegion(8855): Flush requested on 56b1ef1e5f926a0ee91a838978a47cf4 2024-11-11T04:36:42,363 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 56b1ef1e5f926a0ee91a838978a47cf4:info, priority=-2147483648, current under compaction store size is 1 2024-11-11T04:36:42,363 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T04:36:42,363 DEBUG [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T04:36:42,363 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 56b1ef1e5f926a0ee91a838978a47cf4 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-11-11T04:36:42,365 DEBUG [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 54779 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T04:36:42,365 DEBUG [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] regionserver.HStore(1541): 56b1ef1e5f926a0ee91a838978a47cf4/info is initiating minor compaction (all files) 2024-11-11T04:36:42,365 INFO [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 56b1ef1e5f926a0ee91a838978a47cf4/info in TestLogRolling-testLogRolling,,1731299780210.56b1ef1e5f926a0ee91a838978a47cf4. 2024-11-11T04:36:42,365 INFO [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/56b1ef1e5f926a0ee91a838978a47cf4/info/263fddda3b944421b33b887d1b44fc1a, hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/56b1ef1e5f926a0ee91a838978a47cf4/info/8409f53c215d443c95299f3494de8b0d, hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/56b1ef1e5f926a0ee91a838978a47cf4/info/62e169c000a344cba3611f94bd4551cf] into tmpdir=hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/56b1ef1e5f926a0ee91a838978a47cf4/.tmp, totalSize=53.5 K 2024-11-11T04:36:42,366 DEBUG [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] compactions.Compactor(225): Compacting 263fddda3b944421b33b887d1b44fc1a, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731299790251 2024-11-11T04:36:42,366 DEBUG [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] compactions.Compactor(225): Compacting 8409f53c215d443c95299f3494de8b0d, keycount=23, bloomtype=ROW, size=29.1 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1731299790262 2024-11-11T04:36:42,367 DEBUG [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] compactions.Compactor(225): Compacting 62e169c000a344cba3611f94bd4551cf, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1731299800328 2024-11-11T04:36:42,368 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/56b1ef1e5f926a0ee91a838978a47cf4/.tmp/info/4615908936af46b586fcf25be4460f69 is 1080, key is row0038/info:/1731299802339/Put/seqid=0 2024-11-11T04:36:42,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46475 is added to blk_1073741840_1016 (size=21141) 2024-11-11T04:36:42,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38995 is added to blk_1073741840_1016 (size=21141) 2024-11-11T04:36:42,374 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=65 (bloomFilter=true), to=hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/56b1ef1e5f926a0ee91a838978a47cf4/.tmp/info/4615908936af46b586fcf25be4460f69 2024-11-11T04:36:42,381 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/56b1ef1e5f926a0ee91a838978a47cf4/.tmp/info/4615908936af46b586fcf25be4460f69 as hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/56b1ef1e5f926a0ee91a838978a47cf4/info/4615908936af46b586fcf25be4460f69 2024-11-11T04:36:42,383 INFO [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 56b1ef1e5f926a0ee91a838978a47cf4#info#compaction#60 average throughput is 18.98 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T04:36:42,384 DEBUG [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/56b1ef1e5f926a0ee91a838978a47cf4/.tmp/info/c5c0cac9fba0481886699d414917e1c9 is 1080, key is row0001/info:/1731299790251/Put/seqid=0 2024-11-11T04:36:42,386 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/56b1ef1e5f926a0ee91a838978a47cf4/info/4615908936af46b586fcf25be4460f69, entries=15, sequenceid=65, filesize=20.6 K 2024-11-11T04:36:42,387 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=12.61 KB/12912 for 56b1ef1e5f926a0ee91a838978a47cf4 in 24ms, sequenceid=65, compaction requested=false 2024-11-11T04:36:42,387 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 56b1ef1e5f926a0ee91a838978a47cf4: 2024-11-11T04:36:42,388 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=74.1 K, sizeToCheck=16.0 K 2024-11-11T04:36:42,388 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T04:36:42,388 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/56b1ef1e5f926a0ee91a838978a47cf4/info/8409f53c215d443c95299f3494de8b0d because midkey is the same as first or last row 2024-11-11T04:36:42,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38995 is added to blk_1073741841_1017 (size=44978) 2024-11-11T04:36:42,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46475 is added to blk_1073741841_1017 (size=44978) 2024-11-11T04:36:42,394 DEBUG [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/56b1ef1e5f926a0ee91a838978a47cf4/.tmp/info/c5c0cac9fba0481886699d414917e1c9 as hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/56b1ef1e5f926a0ee91a838978a47cf4/info/c5c0cac9fba0481886699d414917e1c9 2024-11-11T04:36:42,399 INFO [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 56b1ef1e5f926a0ee91a838978a47cf4/info of 56b1ef1e5f926a0ee91a838978a47cf4 into c5c0cac9fba0481886699d414917e1c9(size=43.9 K), total size for store is 64.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T04:36:42,399 DEBUG [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 56b1ef1e5f926a0ee91a838978a47cf4: 2024-11-11T04:36:42,399 INFO [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731299780210.56b1ef1e5f926a0ee91a838978a47cf4., storeName=56b1ef1e5f926a0ee91a838978a47cf4/info, priority=13, startTime=1731299802363; duration=0sec 2024-11-11T04:36:42,399 DEBUG [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=64.6 K, sizeToCheck=16.0 K 2024-11-11T04:36:42,399 DEBUG [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T04:36:42,399 DEBUG [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/56b1ef1e5f926a0ee91a838978a47cf4/info/c5c0cac9fba0481886699d414917e1c9 because midkey is the same as first or last row 2024-11-11T04:36:42,399 DEBUG [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=64.6 K, sizeToCheck=16.0 K 2024-11-11T04:36:42,399 DEBUG [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T04:36:42,399 DEBUG [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/56b1ef1e5f926a0ee91a838978a47cf4/info/c5c0cac9fba0481886699d414917e1c9 because midkey is the same as first or last row 2024-11-11T04:36:42,399 DEBUG [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=64.6 K, sizeToCheck=16.0 K 2024-11-11T04:36:42,399 DEBUG [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T04:36:42,399 DEBUG [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/56b1ef1e5f926a0ee91a838978a47cf4/info/c5c0cac9fba0481886699d414917e1c9 because midkey is the same as first or last row 2024-11-11T04:36:42,399 DEBUG [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T04:36:42,399 DEBUG [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 56b1ef1e5f926a0ee91a838978a47cf4:info 2024-11-11T04:36:42,439 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:42,439 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:43,440 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:43,440 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:44,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33973 {}] regionserver.HRegion(8855): Flush requested on 56b1ef1e5f926a0ee91a838978a47cf4 2024-11-11T04:36:44,387 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 56b1ef1e5f926a0ee91a838978a47cf4 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-11T04:36:44,391 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/56b1ef1e5f926a0ee91a838978a47cf4/.tmp/info/d4507dce1f2b4d7ba8dcec5aa43dd595 is 1080, key is row0053/info:/1731299802364/Put/seqid=0 2024-11-11T04:36:44,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38995 is added to blk_1073741842_1018 (size=18987) 2024-11-11T04:36:44,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46475 is added to blk_1073741842_1018 (size=18987) 2024-11-11T04:36:44,399 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=82 (bloomFilter=true), to=hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/56b1ef1e5f926a0ee91a838978a47cf4/.tmp/info/d4507dce1f2b4d7ba8dcec5aa43dd595 2024-11-11T04:36:44,405 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/56b1ef1e5f926a0ee91a838978a47cf4/.tmp/info/d4507dce1f2b4d7ba8dcec5aa43dd595 as hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/56b1ef1e5f926a0ee91a838978a47cf4/info/d4507dce1f2b4d7ba8dcec5aa43dd595 2024-11-11T04:36:44,410 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/56b1ef1e5f926a0ee91a838978a47cf4/info/d4507dce1f2b4d7ba8dcec5aa43dd595, entries=13, sequenceid=82, filesize=18.5 K 2024-11-11T04:36:44,411 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=15.76 KB/16140 for 56b1ef1e5f926a0ee91a838978a47cf4 in 24ms, sequenceid=82, compaction requested=true 2024-11-11T04:36:44,411 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 56b1ef1e5f926a0ee91a838978a47cf4: 2024-11-11T04:36:44,411 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=83.1 K, sizeToCheck=16.0 K 2024-11-11T04:36:44,411 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T04:36:44,411 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/56b1ef1e5f926a0ee91a838978a47cf4/info/c5c0cac9fba0481886699d414917e1c9 because midkey is the same as first or last row 2024-11-11T04:36:44,411 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 56b1ef1e5f926a0ee91a838978a47cf4:info, priority=-2147483648, current under compaction store size is 1 2024-11-11T04:36:44,411 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T04:36:44,411 DEBUG [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T04:36:44,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33973 {}] regionserver.HRegion(8855): Flush requested on 56b1ef1e5f926a0ee91a838978a47cf4 2024-11-11T04:36:44,411 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 56b1ef1e5f926a0ee91a838978a47cf4 1/1 column families, dataSize=16.81 KB heapSize=18.25 KB 2024-11-11T04:36:44,412 DEBUG [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 85106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T04:36:44,412 DEBUG [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] regionserver.HStore(1541): 56b1ef1e5f926a0ee91a838978a47cf4/info is initiating minor compaction (all files) 2024-11-11T04:36:44,412 INFO [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 56b1ef1e5f926a0ee91a838978a47cf4/info in TestLogRolling-testLogRolling,,1731299780210.56b1ef1e5f926a0ee91a838978a47cf4. 2024-11-11T04:36:44,412 INFO [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/56b1ef1e5f926a0ee91a838978a47cf4/info/c5c0cac9fba0481886699d414917e1c9, hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/56b1ef1e5f926a0ee91a838978a47cf4/info/4615908936af46b586fcf25be4460f69, hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/56b1ef1e5f926a0ee91a838978a47cf4/info/d4507dce1f2b4d7ba8dcec5aa43dd595] into tmpdir=hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/56b1ef1e5f926a0ee91a838978a47cf4/.tmp, totalSize=83.1 K 2024-11-11T04:36:44,413 DEBUG [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] compactions.Compactor(225): Compacting c5c0cac9fba0481886699d414917e1c9, keycount=37, bloomtype=ROW, size=43.9 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1731299790251 2024-11-11T04:36:44,413 DEBUG [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] compactions.Compactor(225): Compacting 4615908936af46b586fcf25be4460f69, keycount=15, bloomtype=ROW, size=20.6 K, encoding=NONE, compression=NONE, seqNum=65, earliestPutTs=1731299802339 2024-11-11T04:36:44,414 DEBUG [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] compactions.Compactor(225): Compacting d4507dce1f2b4d7ba8dcec5aa43dd595, keycount=13, bloomtype=ROW, size=18.5 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1731299802364 2024-11-11T04:36:44,415 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/56b1ef1e5f926a0ee91a838978a47cf4/.tmp/info/54d6c5a866764548a75de064e45c94bf is 1080, key is row0066/info:/1731299804388/Put/seqid=0 2024-11-11T04:36:44,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46475 is added to blk_1073741843_1019 (size=22222) 2024-11-11T04:36:44,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38995 is added to blk_1073741843_1019 (size=22222) 2024-11-11T04:36:44,421 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=16.81 KB at sequenceid=101 (bloomFilter=true), to=hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/56b1ef1e5f926a0ee91a838978a47cf4/.tmp/info/54d6c5a866764548a75de064e45c94bf 2024-11-11T04:36:44,426 INFO [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 56b1ef1e5f926a0ee91a838978a47cf4#info#compaction#63 average throughput is 33.35 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T04:36:44,426 DEBUG [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/56b1ef1e5f926a0ee91a838978a47cf4/.tmp/info/a43b6c0c26664e50bb4d5f761e1f2786 is 1080, key is row0001/info:/1731299790251/Put/seqid=0 2024-11-11T04:36:44,427 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/56b1ef1e5f926a0ee91a838978a47cf4/.tmp/info/54d6c5a866764548a75de064e45c94bf as hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/56b1ef1e5f926a0ee91a838978a47cf4/info/54d6c5a866764548a75de064e45c94bf 2024-11-11T04:36:44,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38995 is added to blk_1073741844_1020 (size=75378) 2024-11-11T04:36:44,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46475 is added to blk_1073741844_1020 (size=75378) 2024-11-11T04:36:44,433 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/56b1ef1e5f926a0ee91a838978a47cf4/info/54d6c5a866764548a75de064e45c94bf, entries=16, sequenceid=101, filesize=21.7 K 2024-11-11T04:36:44,436 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~16.81 KB/17216, heapSize ~18.23 KB/18672, currentSize=14.71 KB/15064 for 56b1ef1e5f926a0ee91a838978a47cf4 in 25ms, sequenceid=101, compaction requested=false 2024-11-11T04:36:44,436 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 56b1ef1e5f926a0ee91a838978a47cf4: 2024-11-11T04:36:44,436 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=104.8 K, sizeToCheck=16.0 K 2024-11-11T04:36:44,436 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T04:36:44,436 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/56b1ef1e5f926a0ee91a838978a47cf4/info/c5c0cac9fba0481886699d414917e1c9 because midkey is the same as first or last row 2024-11-11T04:36:44,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33973 {}] regionserver.HRegion(8855): Flush requested on 56b1ef1e5f926a0ee91a838978a47cf4 2024-11-11T04:36:44,437 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 56b1ef1e5f926a0ee91a838978a47cf4 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-11-11T04:36:44,438 DEBUG [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/56b1ef1e5f926a0ee91a838978a47cf4/.tmp/info/a43b6c0c26664e50bb4d5f761e1f2786 as hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/56b1ef1e5f926a0ee91a838978a47cf4/info/a43b6c0c26664e50bb4d5f761e1f2786 2024-11-11T04:36:44,440 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:44,440 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:44,441 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/56b1ef1e5f926a0ee91a838978a47cf4/.tmp/info/32fba6b5280745258deefbccfed62f47 is 1080, key is row0082/info:/1731299804412/Put/seqid=0 2024-11-11T04:36:44,445 INFO [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 56b1ef1e5f926a0ee91a838978a47cf4/info of 56b1ef1e5f926a0ee91a838978a47cf4 into a43b6c0c26664e50bb4d5f761e1f2786(size=73.6 K), total size for store is 95.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T04:36:44,445 DEBUG [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 56b1ef1e5f926a0ee91a838978a47cf4: 2024-11-11T04:36:44,445 INFO [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731299780210.56b1ef1e5f926a0ee91a838978a47cf4., storeName=56b1ef1e5f926a0ee91a838978a47cf4/info, priority=13, startTime=1731299804411; duration=0sec 2024-11-11T04:36:44,445 DEBUG [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=95.3 K, sizeToCheck=16.0 K 2024-11-11T04:36:44,445 DEBUG [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T04:36:44,445 DEBUG [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=95.3 K, sizeToCheck=16.0 K 2024-11-11T04:36:44,445 DEBUG [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T04:36:44,445 DEBUG [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=95.3 K, sizeToCheck=16.0 K 2024-11-11T04:36:44,445 DEBUG [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T04:36:44,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38995 is added to blk_1073741845_1021 (size=21141) 2024-11-11T04:36:44,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46475 is added to blk_1073741845_1021 (size=21141) 2024-11-11T04:36:44,446 DEBUG [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1731299780210.56b1ef1e5f926a0ee91a838978a47cf4., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T04:36:44,446 DEBUG [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T04:36:44,446 DEBUG [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 56b1ef1e5f926a0ee91a838978a47cf4:info 2024-11-11T04:36:44,446 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=119 (bloomFilter=true), to=hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/56b1ef1e5f926a0ee91a838978a47cf4/.tmp/info/32fba6b5280745258deefbccfed62f47 2024-11-11T04:36:44,447 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43893 {}] assignment.AssignmentManager(1363): Split request from a7bef91497aa,33973,1731299779381, parent={ENCODED => 56b1ef1e5f926a0ee91a838978a47cf4, NAME => 'TestLogRolling-testLogRolling,,1731299780210.56b1ef1e5f926a0ee91a838978a47cf4.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-11-11T04:36:44,451 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/56b1ef1e5f926a0ee91a838978a47cf4/.tmp/info/32fba6b5280745258deefbccfed62f47 as hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/56b1ef1e5f926a0ee91a838978a47cf4/info/32fba6b5280745258deefbccfed62f47 2024-11-11T04:36:44,453 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43893 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=a7bef91497aa,33973,1731299779381 2024-11-11T04:36:44,456 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/56b1ef1e5f926a0ee91a838978a47cf4/info/32fba6b5280745258deefbccfed62f47, entries=15, sequenceid=119, filesize=20.6 K 2024-11-11T04:36:44,456 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=0 B/0 for 56b1ef1e5f926a0ee91a838978a47cf4 in 19ms, sequenceid=119, compaction requested=true 2024-11-11T04:36:44,457 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 56b1ef1e5f926a0ee91a838978a47cf4: 2024-11-11T04:36:44,457 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=116.0 K, sizeToCheck=16.0 K 2024-11-11T04:36:44,457 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T04:36:44,457 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=116.0 K, sizeToCheck=16.0 K 2024-11-11T04:36:44,457 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T04:36:44,457 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=116.0 K, sizeToCheck=16.0 K 2024-11-11T04:36:44,457 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T04:36:44,457 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1731299780210.56b1ef1e5f926a0ee91a838978a47cf4., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=1 2024-11-11T04:36:44,456 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43893 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=56b1ef1e5f926a0ee91a838978a47cf4, daughterA=6f92e86db399ee7ca557aad38d81d716, daughterB=f5f10686e127082b1019007ad1efb944 2024-11-11T04:36:44,457 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=56b1ef1e5f926a0ee91a838978a47cf4, daughterA=6f92e86db399ee7ca557aad38d81d716, daughterB=f5f10686e127082b1019007ad1efb944 2024-11-11T04:36:44,458 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=56b1ef1e5f926a0ee91a838978a47cf4, daughterA=6f92e86db399ee7ca557aad38d81d716, daughterB=f5f10686e127082b1019007ad1efb944 2024-11-11T04:36:44,458 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=56b1ef1e5f926a0ee91a838978a47cf4, daughterA=6f92e86db399ee7ca557aad38d81d716, daughterB=f5f10686e127082b1019007ad1efb944 2024-11-11T04:36:44,458 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43893 {}] assignment.AssignmentManager(1363): Split request from a7bef91497aa,33973,1731299779381, parent={ENCODED => 56b1ef1e5f926a0ee91a838978a47cf4, NAME => 'TestLogRolling-testLogRolling,,1731299780210.56b1ef1e5f926a0ee91a838978a47cf4.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-11-11T04:36:44,459 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43893 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=SPLITTING, location=a7bef91497aa,33973,1731299779381 2024-11-11T04:36:44,460 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43893 {}] procedure2.ProcedureExecutor(1139): Stored pid=8, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=56b1ef1e5f926a0ee91a838978a47cf4, daughterA=d71f76f2268b3e634ede9164e70e06a8, daughterB=0196f1d0ad26827cae1ad7dcad6e8626 2024-11-11T04:36:44,460 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(842): Waiting on xlock for pid=8, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=56b1ef1e5f926a0ee91a838978a47cf4, daughterA=d71f76f2268b3e634ede9164e70e06a8, daughterB=0196f1d0ad26827cae1ad7dcad6e8626 held by pid=7 2024-11-11T04:36:44,464 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=56b1ef1e5f926a0ee91a838978a47cf4, UNASSIGN}] 2024-11-11T04:36:44,467 DEBUG [PEWorker-3 {}] assignment.SplitTableRegionProcedure(162): LOCK_EVENT_WAIT SchemaLocking[serverLocks={},namespaceLocks={hbase=LockAndQueue[exclusiveLock=false,sharedLockCount=0,waitingProcCount=0]},tableLocks={hbase:meta=LockAndQueue[exclusiveLock=false,sharedLockCount=0,waitingProcCount=0]},regionLocks={},peerLocks={},metaLocks={hbase:meta=LockAndQueue[exclusiveLock=false,sharedLockCount=0,waitingProcCount=0]},globalLocks={}] 2024-11-11T04:36:44,467 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=56b1ef1e5f926a0ee91a838978a47cf4, UNASSIGN 2024-11-11T04:36:44,467 DEBUG [PEWorker-3 {}] procedure2.ProcedureExecutor(1511): LOCK_EVENT_WAIT pid=8, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=56b1ef1e5f926a0ee91a838978a47cf4, daughterA=d71f76f2268b3e634ede9164e70e06a8, daughterB=0196f1d0ad26827cae1ad7dcad6e8626 2024-11-11T04:36:44,468 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=56b1ef1e5f926a0ee91a838978a47cf4, regionState=CLOSING, regionLocation=a7bef91497aa,33973,1731299779381 2024-11-11T04:36:44,470 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=56b1ef1e5f926a0ee91a838978a47cf4, UNASSIGN because future has completed 2024-11-11T04:36:44,471 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-11-11T04:36:44,471 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; CloseRegionProcedure 56b1ef1e5f926a0ee91a838978a47cf4, server=a7bef91497aa,33973,1731299779381}] 2024-11-11T04:36:44,627 INFO [RS_CLOSE_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] handler.UnassignRegionHandler(122): Close 56b1ef1e5f926a0ee91a838978a47cf4 2024-11-11T04:36:44,627 DEBUG [RS_CLOSE_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-11-11T04:36:44,628 DEBUG [RS_CLOSE_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1722): Closing 56b1ef1e5f926a0ee91a838978a47cf4, disabling compactions & flushes 2024-11-11T04:36:44,628 INFO [RS_CLOSE_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731299780210.56b1ef1e5f926a0ee91a838978a47cf4. 2024-11-11T04:36:44,628 DEBUG [RS_CLOSE_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731299780210.56b1ef1e5f926a0ee91a838978a47cf4. 2024-11-11T04:36:44,628 DEBUG [RS_CLOSE_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731299780210.56b1ef1e5f926a0ee91a838978a47cf4. after waiting 0 ms 2024-11-11T04:36:44,628 DEBUG [RS_CLOSE_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731299780210.56b1ef1e5f926a0ee91a838978a47cf4. 2024-11-11T04:36:44,629 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731299780210.56b1ef1e5f926a0ee91a838978a47cf4.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/56b1ef1e5f926a0ee91a838978a47cf4/info/263fddda3b944421b33b887d1b44fc1a, hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/56b1ef1e5f926a0ee91a838978a47cf4/info/8409f53c215d443c95299f3494de8b0d, hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/56b1ef1e5f926a0ee91a838978a47cf4/info/c5c0cac9fba0481886699d414917e1c9, hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/56b1ef1e5f926a0ee91a838978a47cf4/info/62e169c000a344cba3611f94bd4551cf, hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/56b1ef1e5f926a0ee91a838978a47cf4/info/4615908936af46b586fcf25be4460f69, hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/56b1ef1e5f926a0ee91a838978a47cf4/info/d4507dce1f2b4d7ba8dcec5aa43dd595] to archive 2024-11-11T04:36:44,630 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731299780210.56b1ef1e5f926a0ee91a838978a47cf4.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-11T04:36:44,631 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731299780210.56b1ef1e5f926a0ee91a838978a47cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/56b1ef1e5f926a0ee91a838978a47cf4/info/263fddda3b944421b33b887d1b44fc1a to hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/archive/data/default/TestLogRolling-testLogRolling/56b1ef1e5f926a0ee91a838978a47cf4/info/263fddda3b944421b33b887d1b44fc1a 2024-11-11T04:36:44,632 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731299780210.56b1ef1e5f926a0ee91a838978a47cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/56b1ef1e5f926a0ee91a838978a47cf4/info/8409f53c215d443c95299f3494de8b0d to hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/archive/data/default/TestLogRolling-testLogRolling/56b1ef1e5f926a0ee91a838978a47cf4/info/8409f53c215d443c95299f3494de8b0d 2024-11-11T04:36:44,633 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731299780210.56b1ef1e5f926a0ee91a838978a47cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/56b1ef1e5f926a0ee91a838978a47cf4/info/c5c0cac9fba0481886699d414917e1c9 to hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/archive/data/default/TestLogRolling-testLogRolling/56b1ef1e5f926a0ee91a838978a47cf4/info/c5c0cac9fba0481886699d414917e1c9 2024-11-11T04:36:44,634 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731299780210.56b1ef1e5f926a0ee91a838978a47cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/56b1ef1e5f926a0ee91a838978a47cf4/info/62e169c000a344cba3611f94bd4551cf to hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/archive/data/default/TestLogRolling-testLogRolling/56b1ef1e5f926a0ee91a838978a47cf4/info/62e169c000a344cba3611f94bd4551cf 2024-11-11T04:36:44,635 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731299780210.56b1ef1e5f926a0ee91a838978a47cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/56b1ef1e5f926a0ee91a838978a47cf4/info/4615908936af46b586fcf25be4460f69 to hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/archive/data/default/TestLogRolling-testLogRolling/56b1ef1e5f926a0ee91a838978a47cf4/info/4615908936af46b586fcf25be4460f69 2024-11-11T04:36:44,636 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731299780210.56b1ef1e5f926a0ee91a838978a47cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/56b1ef1e5f926a0ee91a838978a47cf4/info/d4507dce1f2b4d7ba8dcec5aa43dd595 to hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/archive/data/default/TestLogRolling-testLogRolling/56b1ef1e5f926a0ee91a838978a47cf4/info/d4507dce1f2b4d7ba8dcec5aa43dd595 2024-11-11T04:36:44,642 DEBUG [RS_CLOSE_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/56b1ef1e5f926a0ee91a838978a47cf4/recovered.edits/123.seqid, newMaxSeqId=123, maxSeqId=1 2024-11-11T04:36:44,643 INFO [RS_CLOSE_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731299780210.56b1ef1e5f926a0ee91a838978a47cf4. 2024-11-11T04:36:44,643 DEBUG [RS_CLOSE_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1676): Region close journal for 56b1ef1e5f926a0ee91a838978a47cf4: Waiting for close lock at 1731299804628Running coprocessor pre-close hooks at 1731299804628Disabling compacts and flushes for region at 1731299804628Disabling writes for close at 1731299804628Writing region close event to WAL at 1731299804639 (+11 ms)Running coprocessor post-close hooks at 1731299804643 (+4 ms)Closed at 1731299804643 2024-11-11T04:36:44,645 INFO [RS_CLOSE_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] handler.UnassignRegionHandler(157): Closed 56b1ef1e5f926a0ee91a838978a47cf4 2024-11-11T04:36:44,646 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=56b1ef1e5f926a0ee91a838978a47cf4, regionState=CLOSED 2024-11-11T04:36:44,648 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=9, state=RUNNABLE, hasLock=false; CloseRegionProcedure 56b1ef1e5f926a0ee91a838978a47cf4, server=a7bef91497aa,33973,1731299779381 because future has completed 2024-11-11T04:36:44,651 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-11-11T04:36:44,651 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; CloseRegionProcedure 56b1ef1e5f926a0ee91a838978a47cf4, server=a7bef91497aa,33973,1731299779381 in 178 msec 2024-11-11T04:36:44,653 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=7 2024-11-11T04:36:44,653 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=56b1ef1e5f926a0ee91a838978a47cf4, UNASSIGN in 187 msec 2024-11-11T04:36:44,660 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:36:44,663 INFO [PEWorker-5 {}] assignment.SplitTableRegionProcedure(728): pid=7 splitting 3 storefiles, region=56b1ef1e5f926a0ee91a838978a47cf4, threads=3 2024-11-11T04:36:44,664 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/56b1ef1e5f926a0ee91a838978a47cf4/info/a43b6c0c26664e50bb4d5f761e1f2786 for region: 56b1ef1e5f926a0ee91a838978a47cf4 2024-11-11T04:36:44,664 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/56b1ef1e5f926a0ee91a838978a47cf4/info/32fba6b5280745258deefbccfed62f47 for region: 56b1ef1e5f926a0ee91a838978a47cf4 2024-11-11T04:36:44,664 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/56b1ef1e5f926a0ee91a838978a47cf4/info/54d6c5a866764548a75de064e45c94bf for region: 56b1ef1e5f926a0ee91a838978a47cf4 2024-11-11T04:36:44,673 DEBUG [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/56b1ef1e5f926a0ee91a838978a47cf4/info/54d6c5a866764548a75de064e45c94bf, top=true 2024-11-11T04:36:44,673 DEBUG [StoreFileSplitter-pool-0 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/56b1ef1e5f926a0ee91a838978a47cf4/info/32fba6b5280745258deefbccfed62f47, top=true 2024-11-11T04:36:44,681 INFO [StoreFileSplitter-pool-0 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/TestLogRolling-testLogRolling=56b1ef1e5f926a0ee91a838978a47cf4-32fba6b5280745258deefbccfed62f47 for child: f5f10686e127082b1019007ad1efb944, parent: 56b1ef1e5f926a0ee91a838978a47cf4 2024-11-11T04:36:44,681 INFO [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/TestLogRolling-testLogRolling=56b1ef1e5f926a0ee91a838978a47cf4-54d6c5a866764548a75de064e45c94bf for child: f5f10686e127082b1019007ad1efb944, parent: 56b1ef1e5f926a0ee91a838978a47cf4 2024-11-11T04:36:44,681 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/56b1ef1e5f926a0ee91a838978a47cf4/info/32fba6b5280745258deefbccfed62f47 for region: 56b1ef1e5f926a0ee91a838978a47cf4 2024-11-11T04:36:44,681 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/56b1ef1e5f926a0ee91a838978a47cf4/info/54d6c5a866764548a75de064e45c94bf for region: 56b1ef1e5f926a0ee91a838978a47cf4 2024-11-11T04:36:44,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46475 is added to blk_1073741846_1022 (size=27) 2024-11-11T04:36:44,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38995 is added to blk_1073741846_1022 (size=27) 2024-11-11T04:36:44,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46475 is added to blk_1073741847_1023 (size=27) 2024-11-11T04:36:44,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38995 is added to blk_1073741847_1023 (size=27) 2024-11-11T04:36:44,690 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/56b1ef1e5f926a0ee91a838978a47cf4/info/a43b6c0c26664e50bb4d5f761e1f2786 for region: 56b1ef1e5f926a0ee91a838978a47cf4 2024-11-11T04:36:44,692 DEBUG [PEWorker-5 {}] assignment.SplitTableRegionProcedure(802): pid=7 split storefiles for region 56b1ef1e5f926a0ee91a838978a47cf4 Daughter A: [hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/6f92e86db399ee7ca557aad38d81d716/info/a43b6c0c26664e50bb4d5f761e1f2786.56b1ef1e5f926a0ee91a838978a47cf4] storefiles, Daughter B: [hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/TestLogRolling-testLogRolling=56b1ef1e5f926a0ee91a838978a47cf4-32fba6b5280745258deefbccfed62f47, hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/TestLogRolling-testLogRolling=56b1ef1e5f926a0ee91a838978a47cf4-54d6c5a866764548a75de064e45c94bf, hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/a43b6c0c26664e50bb4d5f761e1f2786.56b1ef1e5f926a0ee91a838978a47cf4] storefiles. 2024-11-11T04:36:44,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38995 is added to blk_1073741848_1024 (size=71) 2024-11-11T04:36:44,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46475 is added to blk_1073741848_1024 (size=71) 2024-11-11T04:36:44,700 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:36:44,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46475 is added to blk_1073741849_1025 (size=71) 2024-11-11T04:36:44,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38995 is added to blk_1073741849_1025 (size=71) 2024-11-11T04:36:44,711 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:36:44,719 DEBUG [PEWorker-5 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/6f92e86db399ee7ca557aad38d81d716/recovered.edits/123.seqid, newMaxSeqId=123, maxSeqId=-1 2024-11-11T04:36:44,721 DEBUG [PEWorker-5 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/recovered.edits/123.seqid, newMaxSeqId=123, maxSeqId=-1 2024-11-11T04:36:44,723 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1731299780210.56b1ef1e5f926a0ee91a838978a47cf4.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1731299804723"},{"qualifier":"splitA","vlen":70,"tag":[],"timestamp":"1731299804723"},{"qualifier":"splitB","vlen":70,"tag":[],"timestamp":"1731299804723"}]},"ts":"1731299804723"} 2024-11-11T04:36:44,723 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1731299804453.6f92e86db399ee7ca557aad38d81d716.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1731299804723"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731299804723"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1731299804723"}]},"ts":"1731299804723"} 2024-11-11T04:36:44,723 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,row0062,1731299804453.f5f10686e127082b1019007ad1efb944.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1731299804723"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731299804723"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1731299804723"}]},"ts":"1731299804723"} 2024-11-11T04:36:44,740 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=6f92e86db399ee7ca557aad38d81d716, ASSIGN}, {pid=12, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=f5f10686e127082b1019007ad1efb944, ASSIGN}] 2024-11-11T04:36:44,741 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=6f92e86db399ee7ca557aad38d81d716, ASSIGN 2024-11-11T04:36:44,741 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=12, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=f5f10686e127082b1019007ad1efb944, ASSIGN 2024-11-11T04:36:44,741 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=6f92e86db399ee7ca557aad38d81d716, ASSIGN; state=SPLITTING_NEW, location=a7bef91497aa,33973,1731299779381; forceNewPlan=false, retain=false 2024-11-11T04:36:44,742 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=12, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=f5f10686e127082b1019007ad1efb944, ASSIGN; state=SPLITTING_NEW, location=a7bef91497aa,33973,1731299779381; forceNewPlan=false, retain=false 2024-11-11T04:36:44,892 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=6f92e86db399ee7ca557aad38d81d716, regionState=OPENING, regionLocation=a7bef91497aa,33973,1731299779381 2024-11-11T04:36:44,892 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=12 updating hbase:meta row=f5f10686e127082b1019007ad1efb944, regionState=OPENING, regionLocation=a7bef91497aa,33973,1731299779381 2024-11-11T04:36:44,894 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=f5f10686e127082b1019007ad1efb944, ASSIGN because future has completed 2024-11-11T04:36:44,895 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE, hasLock=false; OpenRegionProcedure f5f10686e127082b1019007ad1efb944, server=a7bef91497aa,33973,1731299779381}] 2024-11-11T04:36:44,895 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=6f92e86db399ee7ca557aad38d81d716, ASSIGN because future has completed 2024-11-11T04:36:44,896 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 6f92e86db399ee7ca557aad38d81d716, server=a7bef91497aa,33973,1731299779381}] 2024-11-11T04:36:45,051 INFO [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,row0062,1731299804453.f5f10686e127082b1019007ad1efb944. 2024-11-11T04:36:45,051 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7752): Opening region: {ENCODED => f5f10686e127082b1019007ad1efb944, NAME => 'TestLogRolling-testLogRolling,row0062,1731299804453.f5f10686e127082b1019007ad1efb944.', STARTKEY => 'row0062', ENDKEY => ''} 2024-11-11T04:36:45,051 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling f5f10686e127082b1019007ad1efb944 2024-11-11T04:36:45,051 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,row0062,1731299804453.f5f10686e127082b1019007ad1efb944.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T04:36:45,051 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7794): checking encryption for f5f10686e127082b1019007ad1efb944 2024-11-11T04:36:45,051 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7797): checking classloading for f5f10686e127082b1019007ad1efb944 2024-11-11T04:36:45,053 INFO [StoreOpener-f5f10686e127082b1019007ad1efb944-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region f5f10686e127082b1019007ad1efb944 2024-11-11T04:36:45,053 INFO [StoreOpener-f5f10686e127082b1019007ad1efb944-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region f5f10686e127082b1019007ad1efb944 columnFamilyName info 2024-11-11T04:36:45,054 DEBUG [StoreOpener-f5f10686e127082b1019007ad1efb944-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:36:45,062 DEBUG [StoreOpener-f5f10686e127082b1019007ad1efb944-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/TestLogRolling-testLogRolling=56b1ef1e5f926a0ee91a838978a47cf4-32fba6b5280745258deefbccfed62f47 2024-11-11T04:36:45,066 DEBUG [StoreOpener-f5f10686e127082b1019007ad1efb944-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/TestLogRolling-testLogRolling=56b1ef1e5f926a0ee91a838978a47cf4-54d6c5a866764548a75de064e45c94bf 2024-11-11T04:36:45,072 DEBUG [StoreOpener-f5f10686e127082b1019007ad1efb944-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/a43b6c0c26664e50bb4d5f761e1f2786.56b1ef1e5f926a0ee91a838978a47cf4->hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/56b1ef1e5f926a0ee91a838978a47cf4/info/a43b6c0c26664e50bb4d5f761e1f2786-top 2024-11-11T04:36:45,073 INFO [StoreOpener-f5f10686e127082b1019007ad1efb944-1 {}] regionserver.HStore(327): Store=f5f10686e127082b1019007ad1efb944/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T04:36:45,073 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1038): replaying wal for f5f10686e127082b1019007ad1efb944 2024-11-11T04:36:45,074 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944 2024-11-11T04:36:45,075 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944 2024-11-11T04:36:45,075 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1048): stopping wal replay for f5f10686e127082b1019007ad1efb944 2024-11-11T04:36:45,075 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1060): Cleaning up temporary data for f5f10686e127082b1019007ad1efb944 2024-11-11T04:36:45,077 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1093): writing seq id for f5f10686e127082b1019007ad1efb944 2024-11-11T04:36:45,077 INFO [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1114): Opened f5f10686e127082b1019007ad1efb944; next sequenceid=124; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=779995, jitterRate=-0.008186310529708862}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-11T04:36:45,077 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1122): Running coprocessor post-open hooks for f5f10686e127082b1019007ad1efb944 2024-11-11T04:36:45,078 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1006): Region open journal for f5f10686e127082b1019007ad1efb944: Running coprocessor pre-open hook at 1731299805052Writing region info on filesystem at 1731299805052Initializing all the Stores at 1731299805052Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731299805052Cleaning up temporary data from old regions at 1731299805075 (+23 ms)Running coprocessor post-open hooks at 1731299805077 (+2 ms)Region opened successfully at 1731299805078 (+1 ms) 2024-11-11T04:36:45,079 INFO [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,row0062,1731299804453.f5f10686e127082b1019007ad1efb944., pid=13, masterSystemTime=1731299805047 2024-11-11T04:36:45,079 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(403): Add compact mark for store f5f10686e127082b1019007ad1efb944:info, priority=-2147483648, current under compaction store size is 1 2024-11-11T04:36:45,079 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T04:36:45,079 DEBUG [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T04:36:45,080 INFO [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,row0062,1731299804453.f5f10686e127082b1019007ad1efb944. 2024-11-11T04:36:45,080 DEBUG [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] regionserver.HStore(1541): f5f10686e127082b1019007ad1efb944/info is initiating minor compaction (all files) 2024-11-11T04:36:45,080 INFO [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of f5f10686e127082b1019007ad1efb944/info in TestLogRolling-testLogRolling,row0062,1731299804453.f5f10686e127082b1019007ad1efb944. 2024-11-11T04:36:45,081 INFO [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/a43b6c0c26664e50bb4d5f761e1f2786.56b1ef1e5f926a0ee91a838978a47cf4->hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/56b1ef1e5f926a0ee91a838978a47cf4/info/a43b6c0c26664e50bb4d5f761e1f2786-top, hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/TestLogRolling-testLogRolling=56b1ef1e5f926a0ee91a838978a47cf4-54d6c5a866764548a75de064e45c94bf, hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/TestLogRolling-testLogRolling=56b1ef1e5f926a0ee91a838978a47cf4-32fba6b5280745258deefbccfed62f47] into tmpdir=hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/.tmp, totalSize=116.0 K 2024-11-11T04:36:45,081 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,row0062,1731299804453.f5f10686e127082b1019007ad1efb944. 2024-11-11T04:36:45,081 INFO [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,row0062,1731299804453.f5f10686e127082b1019007ad1efb944. 2024-11-11T04:36:45,081 INFO [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1731299804453.6f92e86db399ee7ca557aad38d81d716. 2024-11-11T04:36:45,081 DEBUG [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] compactions.Compactor(225): Compacting a43b6c0c26664e50bb4d5f761e1f2786.56b1ef1e5f926a0ee91a838978a47cf4, keycount=32, bloomtype=ROW, size=73.6 K, encoding=NONE, compression=NONE, seqNum=83, earliestPutTs=1731299790251 2024-11-11T04:36:45,081 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(7752): Opening region: {ENCODED => 6f92e86db399ee7ca557aad38d81d716, NAME => 'TestLogRolling-testLogRolling,,1731299804453.6f92e86db399ee7ca557aad38d81d716.', STARTKEY => '', ENDKEY => 'row0062'} 2024-11-11T04:36:45,082 DEBUG [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=56b1ef1e5f926a0ee91a838978a47cf4-54d6c5a866764548a75de064e45c94bf, keycount=16, bloomtype=ROW, size=21.7 K, encoding=NONE, compression=NONE, seqNum=101, earliestPutTs=1731299804388 2024-11-11T04:36:45,082 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 6f92e86db399ee7ca557aad38d81d716 2024-11-11T04:36:45,082 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731299804453.6f92e86db399ee7ca557aad38d81d716.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T04:36:45,082 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=12 updating hbase:meta row=f5f10686e127082b1019007ad1efb944, regionState=OPEN, openSeqNum=124, regionLocation=a7bef91497aa,33973,1731299779381 2024-11-11T04:36:45,082 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(7794): checking encryption for 6f92e86db399ee7ca557aad38d81d716 2024-11-11T04:36:45,082 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(7797): checking classloading for 6f92e86db399ee7ca557aad38d81d716 2024-11-11T04:36:45,082 DEBUG [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=56b1ef1e5f926a0ee91a838978a47cf4-32fba6b5280745258deefbccfed62f47, keycount=15, bloomtype=ROW, size=20.6 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1731299804412 2024-11-11T04:36:45,083 INFO [StoreOpener-6f92e86db399ee7ca557aad38d81d716-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 6f92e86db399ee7ca557aad38d81d716 2024-11-11T04:36:45,084 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33973 {}] regionserver.HRegion(8855): Flush requested on 1588230740 2024-11-11T04:36:45,084 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-11-11T04:36:45,084 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=5.15 KB heapSize=9 KB 2024-11-11T04:36:45,084 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=13, ppid=12, state=RUNNABLE, hasLock=false; OpenRegionProcedure f5f10686e127082b1019007ad1efb944, server=a7bef91497aa,33973,1731299779381 because future has completed 2024-11-11T04:36:45,084 INFO [StoreOpener-6f92e86db399ee7ca557aad38d81d716-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6f92e86db399ee7ca557aad38d81d716 columnFamilyName info 2024-11-11T04:36:45,084 DEBUG [StoreOpener-6f92e86db399ee7ca557aad38d81d716-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:36:45,089 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=13, resume processing ppid=12 2024-11-11T04:36:45,089 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=12, state=SUCCESS, hasLock=false; OpenRegionProcedure f5f10686e127082b1019007ad1efb944, server=a7bef91497aa,33973,1731299779381 in 190 msec 2024-11-11T04:36:45,091 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=f5f10686e127082b1019007ad1efb944, ASSIGN in 349 msec 2024-11-11T04:36:45,094 DEBUG [StoreOpener-6f92e86db399ee7ca557aad38d81d716-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/6f92e86db399ee7ca557aad38d81d716/info/a43b6c0c26664e50bb4d5f761e1f2786.56b1ef1e5f926a0ee91a838978a47cf4->hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/56b1ef1e5f926a0ee91a838978a47cf4/info/a43b6c0c26664e50bb4d5f761e1f2786-bottom 2024-11-11T04:36:45,095 INFO [StoreOpener-6f92e86db399ee7ca557aad38d81d716-1 {}] regionserver.HStore(327): Store=6f92e86db399ee7ca557aad38d81d716/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T04:36:45,095 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1038): replaying wal for 6f92e86db399ee7ca557aad38d81d716 2024-11-11T04:36:45,095 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/6f92e86db399ee7ca557aad38d81d716 2024-11-11T04:36:45,096 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/6f92e86db399ee7ca557aad38d81d716 2024-11-11T04:36:45,097 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1048): stopping wal replay for 6f92e86db399ee7ca557aad38d81d716 2024-11-11T04:36:45,097 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1060): Cleaning up temporary data for 6f92e86db399ee7ca557aad38d81d716 2024-11-11T04:36:45,099 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1093): writing seq id for 6f92e86db399ee7ca557aad38d81d716 2024-11-11T04:36:45,100 INFO [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1114): Opened 6f92e86db399ee7ca557aad38d81d716; next sequenceid=124; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=880625, jitterRate=0.11977329850196838}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-11T04:36:45,100 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 6f92e86db399ee7ca557aad38d81d716 2024-11-11T04:36:45,100 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1006): Region open journal for 6f92e86db399ee7ca557aad38d81d716: Running coprocessor pre-open hook at 1731299805082Writing region info on filesystem at 1731299805082Initializing all the Stores at 1731299805083 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731299805083Cleaning up temporary data from old regions at 1731299805097 (+14 ms)Running coprocessor post-open hooks at 1731299805100 (+3 ms)Region opened successfully at 1731299805100 2024-11-11T04:36:45,101 INFO [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1731299804453.6f92e86db399ee7ca557aad38d81d716., pid=14, masterSystemTime=1731299805047 2024-11-11T04:36:45,101 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.CompactSplit(403): Add compact mark for store 6f92e86db399ee7ca557aad38d81d716:info, priority=-2147483648, current under compaction store size is 2 2024-11-11T04:36:45,101 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T04:36:45,101 DEBUG [RS:0;a7bef91497aa:33973-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-11-11T04:36:45,102 INFO [RS:0;a7bef91497aa:33973-longCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,,1731299804453.6f92e86db399ee7ca557aad38d81d716. 2024-11-11T04:36:45,102 DEBUG [RS:0;a7bef91497aa:33973-longCompactions-0 {}] regionserver.HStore(1541): 6f92e86db399ee7ca557aad38d81d716/info is initiating minor compaction (all files) 2024-11-11T04:36:45,102 INFO [RS:0;a7bef91497aa:33973-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 6f92e86db399ee7ca557aad38d81d716/info in TestLogRolling-testLogRolling,,1731299804453.6f92e86db399ee7ca557aad38d81d716. 2024-11-11T04:36:45,102 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/hbase/meta/1588230740/.tmp/info/05403edea31142db85a8ef41904f589e is 193, key is TestLogRolling-testLogRolling,row0062,1731299804453.f5f10686e127082b1019007ad1efb944./info:regioninfo/1731299805082/Put/seqid=0 2024-11-11T04:36:45,102 INFO [RS:0;a7bef91497aa:33973-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/6f92e86db399ee7ca557aad38d81d716/info/a43b6c0c26664e50bb4d5f761e1f2786.56b1ef1e5f926a0ee91a838978a47cf4->hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/56b1ef1e5f926a0ee91a838978a47cf4/info/a43b6c0c26664e50bb4d5f761e1f2786-bottom] into tmpdir=hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/6f92e86db399ee7ca557aad38d81d716/.tmp, totalSize=73.6 K 2024-11-11T04:36:45,103 DEBUG [RS:0;a7bef91497aa:33973-longCompactions-0 {}] compactions.Compactor(225): Compacting a43b6c0c26664e50bb4d5f761e1f2786.56b1ef1e5f926a0ee91a838978a47cf4, keycount=32, bloomtype=ROW, size=73.6 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1731299790251 2024-11-11T04:36:45,103 DEBUG [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1731299804453.6f92e86db399ee7ca557aad38d81d716. 2024-11-11T04:36:45,103 INFO [RS_OPEN_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1731299804453.6f92e86db399ee7ca557aad38d81d716. 2024-11-11T04:36:45,104 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=6f92e86db399ee7ca557aad38d81d716, regionState=OPEN, openSeqNum=124, regionLocation=a7bef91497aa,33973,1731299779381 2024-11-11T04:36:45,106 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=14, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 6f92e86db399ee7ca557aad38d81d716, server=a7bef91497aa,33973,1731299779381 because future has completed 2024-11-11T04:36:45,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46475 is added to blk_1073741850_1026 (size=9882) 2024-11-11T04:36:45,108 INFO [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f5f10686e127082b1019007ad1efb944#info#compaction#66 average throughput is 17.96 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T04:36:45,109 DEBUG [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/.tmp/info/d754437446214d328ae345c530837f02 is 1080, key is row0062/info:/1731299802380/Put/seqid=0 2024-11-11T04:36:45,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38995 is added to blk_1073741850_1026 (size=9882) 2024-11-11T04:36:45,110 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.95 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/hbase/meta/1588230740/.tmp/info/05403edea31142db85a8ef41904f589e 2024-11-11T04:36:45,110 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=11 2024-11-11T04:36:45,110 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=11, state=SUCCESS, hasLock=false; OpenRegionProcedure 6f92e86db399ee7ca557aad38d81d716, server=a7bef91497aa,33973,1731299779381 in 212 msec 2024-11-11T04:36:45,113 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=11, resume processing ppid=7 2024-11-11T04:36:45,113 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=6f92e86db399ee7ca557aad38d81d716, ASSIGN in 370 msec 2024-11-11T04:36:45,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46475 is added to blk_1073741851_1027 (size=42984) 2024-11-11T04:36:45,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38995 is added to blk_1073741851_1027 (size=42984) 2024-11-11T04:36:45,115 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=56b1ef1e5f926a0ee91a838978a47cf4, daughterA=6f92e86db399ee7ca557aad38d81d716, daughterB=f5f10686e127082b1019007ad1efb944 in 660 msec 2024-11-11T04:36:45,116 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=56b1ef1e5f926a0ee91a838978a47cf4, daughterA=d71f76f2268b3e634ede9164e70e06a8, daughterB=0196f1d0ad26827cae1ad7dcad6e8626 2024-11-11T04:36:45,116 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=56b1ef1e5f926a0ee91a838978a47cf4, daughterA=d71f76f2268b3e634ede9164e70e06a8, daughterB=0196f1d0ad26827cae1ad7dcad6e8626 2024-11-11T04:36:45,116 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=56b1ef1e5f926a0ee91a838978a47cf4, daughterA=d71f76f2268b3e634ede9164e70e06a8, daughterB=0196f1d0ad26827cae1ad7dcad6e8626 2024-11-11T04:36:45,117 INFO [PEWorker-2 {}] assignment.SplitTableRegionProcedure(534): Split of {ENCODED => 56b1ef1e5f926a0ee91a838978a47cf4, NAME => 'TestLogRolling-testLogRolling,,1731299780210.56b1ef1e5f926a0ee91a838978a47cf4.', STARTKEY => '', ENDKEY => ''} skipped; state is already SPLIT 2024-11-11T04:36:45,118 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=56b1ef1e5f926a0ee91a838978a47cf4, daughterA=d71f76f2268b3e634ede9164e70e06a8, daughterB=0196f1d0ad26827cae1ad7dcad6e8626 in 658 msec 2024-11-11T04:36:45,121 DEBUG [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/.tmp/info/d754437446214d328ae345c530837f02 as hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/d754437446214d328ae345c530837f02 2024-11-11T04:36:45,124 INFO [RS:0;a7bef91497aa:33973-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6f92e86db399ee7ca557aad38d81d716#info#compaction#67 average throughput is 62.60 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T04:36:45,125 DEBUG [RS:0;a7bef91497aa:33973-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/6f92e86db399ee7ca557aad38d81d716/.tmp/info/2ad8aee0e342428189a6c4405b8c3b84 is 1080, key is row0001/info:/1731299790251/Put/seqid=0 2024-11-11T04:36:45,127 INFO [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in f5f10686e127082b1019007ad1efb944/info of f5f10686e127082b1019007ad1efb944 into d754437446214d328ae345c530837f02(size=42.0 K), total size for store is 42.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T04:36:45,127 DEBUG [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for f5f10686e127082b1019007ad1efb944: 2024-11-11T04:36:45,127 INFO [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731299804453.f5f10686e127082b1019007ad1efb944., storeName=f5f10686e127082b1019007ad1efb944/info, priority=13, startTime=1731299805079; duration=0sec 2024-11-11T04:36:45,127 DEBUG [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T04:36:45,127 DEBUG [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f5f10686e127082b1019007ad1efb944:info 2024-11-11T04:36:45,131 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/hbase/meta/1588230740/.tmp/ns/0138c9bff2dd4f6c9757eed8601823e1 is 43, key is default/ns:d/1731299780175/Put/seqid=0 2024-11-11T04:36:45,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38995 is added to blk_1073741852_1028 (size=70862) 2024-11-11T04:36:45,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46475 is added to blk_1073741852_1028 (size=70862) 2024-11-11T04:36:45,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46475 is added to blk_1073741853_1029 (size=5153) 2024-11-11T04:36:45,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38995 is added to blk_1073741853_1029 (size=5153) 2024-11-11T04:36:45,136 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/hbase/meta/1588230740/.tmp/ns/0138c9bff2dd4f6c9757eed8601823e1 2024-11-11T04:36:45,139 DEBUG [RS:0;a7bef91497aa:33973-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/6f92e86db399ee7ca557aad38d81d716/.tmp/info/2ad8aee0e342428189a6c4405b8c3b84 as hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/6f92e86db399ee7ca557aad38d81d716/info/2ad8aee0e342428189a6c4405b8c3b84 2024-11-11T04:36:45,144 INFO [RS:0;a7bef91497aa:33973-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 1 (all) file(s) in 6f92e86db399ee7ca557aad38d81d716/info of 6f92e86db399ee7ca557aad38d81d716 into 2ad8aee0e342428189a6c4405b8c3b84(size=69.2 K), total size for store is 69.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T04:36:45,144 DEBUG [RS:0;a7bef91497aa:33973-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 6f92e86db399ee7ca557aad38d81d716: 2024-11-11T04:36:45,145 INFO [RS:0;a7bef91497aa:33973-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731299804453.6f92e86db399ee7ca557aad38d81d716., storeName=6f92e86db399ee7ca557aad38d81d716/info, priority=15, startTime=1731299805101; duration=0sec 2024-11-11T04:36:45,145 DEBUG [RS:0;a7bef91497aa:33973-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T04:36:45,145 DEBUG [RS:0;a7bef91497aa:33973-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6f92e86db399ee7ca557aad38d81d716:info 2024-11-11T04:36:45,154 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/hbase/meta/1588230740/.tmp/table/d10d0e9d9a314b85beb7e300d99e53bc is 65, key is TestLogRolling-testLogRolling/table:state/1731299780571/Put/seqid=0 2024-11-11T04:36:45,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38995 is added to blk_1073741854_1030 (size=5340) 2024-11-11T04:36:45,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46475 is added to blk_1073741854_1030 (size=5340) 2024-11-11T04:36:45,159 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=122 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/hbase/meta/1588230740/.tmp/table/d10d0e9d9a314b85beb7e300d99e53bc 2024-11-11T04:36:45,164 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/hbase/meta/1588230740/.tmp/info/05403edea31142db85a8ef41904f589e as hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/hbase/meta/1588230740/info/05403edea31142db85a8ef41904f589e 2024-11-11T04:36:45,168 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/hbase/meta/1588230740/info/05403edea31142db85a8ef41904f589e, entries=30, sequenceid=17, filesize=9.7 K 2024-11-11T04:36:45,169 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/hbase/meta/1588230740/.tmp/ns/0138c9bff2dd4f6c9757eed8601823e1 as hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/hbase/meta/1588230740/ns/0138c9bff2dd4f6c9757eed8601823e1 2024-11-11T04:36:45,173 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/hbase/meta/1588230740/ns/0138c9bff2dd4f6c9757eed8601823e1, entries=2, sequenceid=17, filesize=5.0 K 2024-11-11T04:36:45,174 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/hbase/meta/1588230740/.tmp/table/d10d0e9d9a314b85beb7e300d99e53bc as hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/hbase/meta/1588230740/table/d10d0e9d9a314b85beb7e300d99e53bc 2024-11-11T04:36:45,178 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/hbase/meta/1588230740/table/d10d0e9d9a314b85beb7e300d99e53bc, entries=2, sequenceid=17, filesize=5.2 K 2024-11-11T04:36:45,179 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~5.15 KB/5269, heapSize ~8.70 KB/8912, currentSize=670 B/670 for 1588230740 in 95ms, sequenceid=17, compaction requested=false 2024-11-11T04:36:45,179 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-11T04:36:45,441 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:45,441 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:46,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33973 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:57412 deadline: 1731299816438, exception=org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731299780210.56b1ef1e5f926a0ee91a838978a47cf4. is not online on a7bef91497aa,33973,1731299779381 2024-11-11T04:36:46,440 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1731299780210.56b1ef1e5f926a0ee91a838978a47cf4., hostname=a7bef91497aa,33973,1731299779381, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1731299780210.56b1ef1e5f926a0ee91a838978a47cf4., hostname=a7bef91497aa,33973,1731299779381, seqNum=2, error=org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731299780210.56b1ef1e5f926a0ee91a838978a47cf4. is not online on a7bef91497aa,33973,1731299779381 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-11T04:36:46,440 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1731299780210.56b1ef1e5f926a0ee91a838978a47cf4., hostname=a7bef91497aa,33973,1731299779381, seqNum=2 is org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731299780210.56b1ef1e5f926a0ee91a838978a47cf4. is not online on a7bef91497aa,33973,1731299779381 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-11T04:36:46,440 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(88): Try removing region=TestLogRolling-testLogRolling,,1731299780210.56b1ef1e5f926a0ee91a838978a47cf4., hostname=a7bef91497aa,33973,1731299779381, seqNum=2 from cache 2024-11-11T04:36:46,441 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:46,441 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:47,442 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:47,442 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:48,442 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:48,442 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:49,322 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-11T04:36:49,443 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:49,443 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:49,644 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:36:49,644 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:36:49,644 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:36:49,644 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:36:49,644 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:36:49,644 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:36:49,645 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:36:49,645 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:36:49,668 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:36:49,668 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:36:49,669 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:36:49,669 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:36:49,669 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:36:49,669 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:36:49,674 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:36:49,675 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:36:49,675 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:36:49,678 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:36:50,187 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-11T04:36:50,188 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:36:50,188 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:36:50,189 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:36:50,189 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:36:50,189 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:36:50,189 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:36:50,190 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:36:50,190 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:36:50,211 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:36:50,211 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:36:50,211 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:36:50,212 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:36:50,212 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:36:50,212 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:36:50,216 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:36:50,216 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:36:50,216 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:36:50,218 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:36:50,444 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:50,444 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:51,444 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:51,444 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:52,445 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:52,445 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:53,445 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:53,445 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:54,446 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:54,446 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:55,446 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:55,446 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:56,447 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:56,447 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:56,491 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0097', locateType=CURRENT is [region=TestLogRolling-testLogRolling,row0062,1731299804453.f5f10686e127082b1019007ad1efb944., hostname=a7bef91497aa,33973,1731299779381, seqNum=124] 2024-11-11T04:36:56,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33973 {}] regionserver.HRegion(8855): Flush requested on f5f10686e127082b1019007ad1efb944 2024-11-11T04:36:56,503 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing f5f10686e127082b1019007ad1efb944 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-11T04:36:56,507 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/.tmp/info/bd651b3ccf354442a6e394f519a1dc43 is 1080, key is row0097/info:/1731299816492/Put/seqid=0 2024-11-11T04:36:56,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46475 is added to blk_1073741855_1031 (size=12515) 2024-11-11T04:36:56,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38995 is added to blk_1073741855_1031 (size=12515) 2024-11-11T04:36:56,513 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=134 (bloomFilter=true), to=hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/.tmp/info/bd651b3ccf354442a6e394f519a1dc43 2024-11-11T04:36:56,519 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/.tmp/info/bd651b3ccf354442a6e394f519a1dc43 as hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/bd651b3ccf354442a6e394f519a1dc43 2024-11-11T04:36:56,524 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/bd651b3ccf354442a6e394f519a1dc43, entries=7, sequenceid=134, filesize=12.2 K 2024-11-11T04:36:56,525 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=13.66 KB/13988 for f5f10686e127082b1019007ad1efb944 in 22ms, sequenceid=134, compaction requested=false 2024-11-11T04:36:56,525 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for f5f10686e127082b1019007ad1efb944: 2024-11-11T04:36:56,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33973 {}] regionserver.HRegion(8855): Flush requested on f5f10686e127082b1019007ad1efb944 2024-11-11T04:36:56,526 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing f5f10686e127082b1019007ad1efb944 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-11-11T04:36:56,531 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/.tmp/info/1c1f0a04a78b4e89b88aa4f3becc0700 is 1080, key is row0104/info:/1731299816504/Put/seqid=0 2024-11-11T04:36:56,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38995 is added to blk_1073741856_1032 (size=21156) 2024-11-11T04:36:56,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46475 is added to blk_1073741856_1032 (size=21156) 2024-11-11T04:36:56,536 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=152 (bloomFilter=true), to=hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/.tmp/info/1c1f0a04a78b4e89b88aa4f3becc0700 2024-11-11T04:36:56,541 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/.tmp/info/1c1f0a04a78b4e89b88aa4f3becc0700 as hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/1c1f0a04a78b4e89b88aa4f3becc0700 2024-11-11T04:36:56,545 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/1c1f0a04a78b4e89b88aa4f3becc0700, entries=15, sequenceid=152, filesize=20.7 K 2024-11-11T04:36:56,546 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=10.51 KB/10760 for f5f10686e127082b1019007ad1efb944 in 20ms, sequenceid=152, compaction requested=true 2024-11-11T04:36:56,546 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for f5f10686e127082b1019007ad1efb944: 2024-11-11T04:36:56,546 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f5f10686e127082b1019007ad1efb944:info, priority=-2147483648, current under compaction store size is 1 2024-11-11T04:36:56,546 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T04:36:56,546 DEBUG [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T04:36:56,547 DEBUG [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 76655 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T04:36:56,547 DEBUG [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] regionserver.HStore(1541): f5f10686e127082b1019007ad1efb944/info is initiating minor compaction (all files) 2024-11-11T04:36:56,547 INFO [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of f5f10686e127082b1019007ad1efb944/info in TestLogRolling-testLogRolling,row0062,1731299804453.f5f10686e127082b1019007ad1efb944. 2024-11-11T04:36:56,547 INFO [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/d754437446214d328ae345c530837f02, hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/bd651b3ccf354442a6e394f519a1dc43, hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/1c1f0a04a78b4e89b88aa4f3becc0700] into tmpdir=hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/.tmp, totalSize=74.9 K 2024-11-11T04:36:56,547 DEBUG [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] compactions.Compactor(225): Compacting d754437446214d328ae345c530837f02, keycount=35, bloomtype=ROW, size=42.0 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1731299802380 2024-11-11T04:36:56,548 DEBUG [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] compactions.Compactor(225): Compacting bd651b3ccf354442a6e394f519a1dc43, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1731299816492 2024-11-11T04:36:56,548 DEBUG [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1c1f0a04a78b4e89b88aa4f3becc0700, keycount=15, bloomtype=ROW, size=20.7 K, encoding=NONE, compression=NONE, seqNum=152, earliestPutTs=1731299816504 2024-11-11T04:36:56,558 INFO [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f5f10686e127082b1019007ad1efb944#info#compaction#72 average throughput is 58.49 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T04:36:56,558 DEBUG [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/.tmp/info/95d6e0e2c2ff45e883eec59c0ec7fc7e is 1080, key is row0062/info:/1731299802380/Put/seqid=0 2024-11-11T04:36:56,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46475 is added to blk_1073741857_1033 (size=66869) 2024-11-11T04:36:56,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38995 is added to blk_1073741857_1033 (size=66869) 2024-11-11T04:36:56,569 DEBUG [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/.tmp/info/95d6e0e2c2ff45e883eec59c0ec7fc7e as hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/95d6e0e2c2ff45e883eec59c0ec7fc7e 2024-11-11T04:36:56,574 INFO [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in f5f10686e127082b1019007ad1efb944/info of f5f10686e127082b1019007ad1efb944 into 95d6e0e2c2ff45e883eec59c0ec7fc7e(size=65.3 K), total size for store is 65.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T04:36:56,574 DEBUG [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for f5f10686e127082b1019007ad1efb944: 2024-11-11T04:36:56,574 INFO [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731299804453.f5f10686e127082b1019007ad1efb944., storeName=f5f10686e127082b1019007ad1efb944/info, priority=13, startTime=1731299816546; duration=0sec 2024-11-11T04:36:56,574 DEBUG [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T04:36:56,574 DEBUG [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f5f10686e127082b1019007ad1efb944:info 2024-11-11T04:36:57,447 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:57,447 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:58,448 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:58,448 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:58,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33973 {}] regionserver.HRegion(8855): Flush requested on f5f10686e127082b1019007ad1efb944 2024-11-11T04:36:58,543 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing f5f10686e127082b1019007ad1efb944 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-11T04:36:58,548 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/.tmp/info/014e6281fd35403d90a70b40d07b029d is 1080, key is row0119/info:/1731299816527/Put/seqid=0 2024-11-11T04:36:58,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38995 is added to blk_1073741858_1034 (size=16828) 2024-11-11T04:36:58,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46475 is added to blk_1073741858_1034 (size=16828) 2024-11-11T04:36:58,554 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=167 (bloomFilter=true), to=hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/.tmp/info/014e6281fd35403d90a70b40d07b029d 2024-11-11T04:36:58,561 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/.tmp/info/014e6281fd35403d90a70b40d07b029d as hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/014e6281fd35403d90a70b40d07b029d 2024-11-11T04:36:58,566 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/014e6281fd35403d90a70b40d07b029d, entries=11, sequenceid=167, filesize=16.4 K 2024-11-11T04:36:58,566 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=14.71 KB/15064 for f5f10686e127082b1019007ad1efb944 in 23ms, sequenceid=167, compaction requested=false 2024-11-11T04:36:58,566 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for f5f10686e127082b1019007ad1efb944: 2024-11-11T04:36:58,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33973 {}] regionserver.HRegion(8855): Flush requested on f5f10686e127082b1019007ad1efb944 2024-11-11T04:36:58,567 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing f5f10686e127082b1019007ad1efb944 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-11-11T04:36:58,570 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/.tmp/info/f9856905813a41449f83488d78b7a266 is 1080, key is row0130/info:/1731299818544/Put/seqid=0 2024-11-11T04:36:58,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38995 is added to blk_1073741859_1035 (size=21156) 2024-11-11T04:36:58,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46475 is added to blk_1073741859_1035 (size=21156) 2024-11-11T04:36:58,576 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=185 (bloomFilter=true), to=hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/.tmp/info/f9856905813a41449f83488d78b7a266 2024-11-11T04:36:58,581 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/.tmp/info/f9856905813a41449f83488d78b7a266 as hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/f9856905813a41449f83488d78b7a266 2024-11-11T04:36:58,585 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/f9856905813a41449f83488d78b7a266, entries=15, sequenceid=185, filesize=20.7 K 2024-11-11T04:36:58,586 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=12.61 KB/12912 for f5f10686e127082b1019007ad1efb944 in 19ms, sequenceid=185, compaction requested=true 2024-11-11T04:36:58,586 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for f5f10686e127082b1019007ad1efb944: 2024-11-11T04:36:58,586 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f5f10686e127082b1019007ad1efb944:info, priority=-2147483648, current under compaction store size is 1 2024-11-11T04:36:58,586 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T04:36:58,586 DEBUG [RS:0;a7bef91497aa:33973-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T04:36:58,587 DEBUG [RS:0;a7bef91497aa:33973-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 104853 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T04:36:58,587 DEBUG [RS:0;a7bef91497aa:33973-longCompactions-0 {}] regionserver.HStore(1541): f5f10686e127082b1019007ad1efb944/info is initiating minor compaction (all files) 2024-11-11T04:36:58,587 INFO [RS:0;a7bef91497aa:33973-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of f5f10686e127082b1019007ad1efb944/info in TestLogRolling-testLogRolling,row0062,1731299804453.f5f10686e127082b1019007ad1efb944. 2024-11-11T04:36:58,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33973 {}] regionserver.HRegion(8855): Flush requested on f5f10686e127082b1019007ad1efb944 2024-11-11T04:36:58,587 INFO [RS:0;a7bef91497aa:33973-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/95d6e0e2c2ff45e883eec59c0ec7fc7e, hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/014e6281fd35403d90a70b40d07b029d, hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/f9856905813a41449f83488d78b7a266] into tmpdir=hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/.tmp, totalSize=102.4 K 2024-11-11T04:36:58,587 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing f5f10686e127082b1019007ad1efb944 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-11T04:36:58,588 DEBUG [RS:0;a7bef91497aa:33973-longCompactions-0 {}] compactions.Compactor(225): Compacting 95d6e0e2c2ff45e883eec59c0ec7fc7e, keycount=57, bloomtype=ROW, size=65.3 K, encoding=NONE, compression=NONE, seqNum=152, earliestPutTs=1731299802380 2024-11-11T04:36:58,588 DEBUG [RS:0;a7bef91497aa:33973-longCompactions-0 {}] compactions.Compactor(225): Compacting 014e6281fd35403d90a70b40d07b029d, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=167, earliestPutTs=1731299816527 2024-11-11T04:36:58,589 DEBUG [RS:0;a7bef91497aa:33973-longCompactions-0 {}] compactions.Compactor(225): Compacting f9856905813a41449f83488d78b7a266, keycount=15, bloomtype=ROW, size=20.7 K, encoding=NONE, compression=NONE, seqNum=185, earliestPutTs=1731299818544 2024-11-11T04:36:58,591 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/.tmp/info/06e4f11e9e8448e493a0674dbcfb90b9 is 1080, key is row0145/info:/1731299818568/Put/seqid=0 2024-11-11T04:36:58,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38995 is added to blk_1073741860_1036 (size=20078) 2024-11-11T04:36:58,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46475 is added to blk_1073741860_1036 (size=20078) 2024-11-11T04:36:58,597 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=202 (bloomFilter=true), to=hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/.tmp/info/06e4f11e9e8448e493a0674dbcfb90b9 2024-11-11T04:36:58,600 INFO [RS:0;a7bef91497aa:33973-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f5f10686e127082b1019007ad1efb944#info#compaction#76 average throughput is 42.59 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T04:36:58,600 DEBUG [RS:0;a7bef91497aa:33973-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/.tmp/info/1015587f9d354d9fba44ba3628213951 is 1080, key is row0062/info:/1731299802380/Put/seqid=0 2024-11-11T04:36:58,603 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/.tmp/info/06e4f11e9e8448e493a0674dbcfb90b9 as hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/06e4f11e9e8448e493a0674dbcfb90b9 2024-11-11T04:36:58,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38995 is added to blk_1073741861_1037 (size=95076) 2024-11-11T04:36:58,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46475 is added to blk_1073741861_1037 (size=95076) 2024-11-11T04:36:58,608 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/06e4f11e9e8448e493a0674dbcfb90b9, entries=14, sequenceid=202, filesize=19.6 K 2024-11-11T04:36:58,609 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=2.10 KB/2152 for f5f10686e127082b1019007ad1efb944 in 22ms, sequenceid=202, compaction requested=false 2024-11-11T04:36:58,609 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for f5f10686e127082b1019007ad1efb944: 2024-11-11T04:36:58,609 DEBUG [RS:0;a7bef91497aa:33973-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/.tmp/info/1015587f9d354d9fba44ba3628213951 as hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/1015587f9d354d9fba44ba3628213951 2024-11-11T04:36:58,614 INFO [RS:0;a7bef91497aa:33973-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in f5f10686e127082b1019007ad1efb944/info of f5f10686e127082b1019007ad1efb944 into 1015587f9d354d9fba44ba3628213951(size=92.8 K), total size for store is 112.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T04:36:58,614 DEBUG [RS:0;a7bef91497aa:33973-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for f5f10686e127082b1019007ad1efb944: 2024-11-11T04:36:58,614 INFO [RS:0;a7bef91497aa:33973-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731299804453.f5f10686e127082b1019007ad1efb944., storeName=f5f10686e127082b1019007ad1efb944/info, priority=13, startTime=1731299818586; duration=0sec 2024-11-11T04:36:58,614 DEBUG [RS:0;a7bef91497aa:33973-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T04:36:58,614 DEBUG [RS:0;a7bef91497aa:33973-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f5f10686e127082b1019007ad1efb944:info 2024-11-11T04:36:59,449 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:36:59,449 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:37:00,262 INFO [master/a7bef91497aa:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-11T04:37:00,262 INFO [master/a7bef91497aa:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-11T04:37:00,449 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:37:00,449 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:37:00,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33973 {}] regionserver.HRegion(8855): Flush requested on f5f10686e127082b1019007ad1efb944 2024-11-11T04:37:00,599 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing f5f10686e127082b1019007ad1efb944 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-11T04:37:00,603 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/.tmp/info/92eaa615262546dc9214db412b5979eb is 1080, key is row0159/info:/1731299818588/Put/seqid=0 2024-11-11T04:37:00,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38995 is added to blk_1073741862_1038 (size=12516) 2024-11-11T04:37:00,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46475 is added to blk_1073741862_1038 (size=12516) 2024-11-11T04:37:00,623 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=213 (bloomFilter=true), to=hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/.tmp/info/92eaa615262546dc9214db412b5979eb 2024-11-11T04:37:00,628 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/.tmp/info/92eaa615262546dc9214db412b5979eb as hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/92eaa615262546dc9214db412b5979eb 2024-11-11T04:37:00,633 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/92eaa615262546dc9214db412b5979eb, entries=7, sequenceid=213, filesize=12.2 K 2024-11-11T04:37:00,634 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=16.81 KB/17216 for f5f10686e127082b1019007ad1efb944 in 34ms, sequenceid=213, compaction requested=true 2024-11-11T04:37:00,634 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for f5f10686e127082b1019007ad1efb944: 2024-11-11T04:37:00,634 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f5f10686e127082b1019007ad1efb944:info, priority=-2147483648, current under compaction store size is 1 2024-11-11T04:37:00,634 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T04:37:00,634 DEBUG [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T04:37:00,635 DEBUG [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 127670 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T04:37:00,635 DEBUG [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] regionserver.HStore(1541): f5f10686e127082b1019007ad1efb944/info is initiating minor compaction (all files) 2024-11-11T04:37:00,635 INFO [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of f5f10686e127082b1019007ad1efb944/info in TestLogRolling-testLogRolling,row0062,1731299804453.f5f10686e127082b1019007ad1efb944. 2024-11-11T04:37:00,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33973 {}] regionserver.HRegion(8855): Flush requested on f5f10686e127082b1019007ad1efb944 2024-11-11T04:37:00,635 INFO [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/1015587f9d354d9fba44ba3628213951, hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/06e4f11e9e8448e493a0674dbcfb90b9, hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/92eaa615262546dc9214db412b5979eb] into tmpdir=hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/.tmp, totalSize=124.7 K 2024-11-11T04:37:00,635 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing f5f10686e127082b1019007ad1efb944 1/1 column families, dataSize=17.86 KB heapSize=19.38 KB 2024-11-11T04:37:00,636 DEBUG [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1015587f9d354d9fba44ba3628213951, keycount=83, bloomtype=ROW, size=92.8 K, encoding=NONE, compression=NONE, seqNum=185, earliestPutTs=1731299802380 2024-11-11T04:37:00,636 DEBUG [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] compactions.Compactor(225): Compacting 06e4f11e9e8448e493a0674dbcfb90b9, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=202, earliestPutTs=1731299818568 2024-11-11T04:37:00,637 DEBUG [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] compactions.Compactor(225): Compacting 92eaa615262546dc9214db412b5979eb, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1731299818588 2024-11-11T04:37:00,639 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/.tmp/info/d8108bf334484c839d56ac03c64367d8 is 1080, key is row0166/info:/1731299820600/Put/seqid=0 2024-11-11T04:37:00,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46475 is added to blk_1073741863_1039 (size=23316) 2024-11-11T04:37:00,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38995 is added to blk_1073741863_1039 (size=23316) 2024-11-11T04:37:00,650 INFO [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f5f10686e127082b1019007ad1efb944#info#compaction#79 average throughput is 53.36 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T04:37:00,651 DEBUG [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/.tmp/info/3155742c0d39466c93a4488947f1aa75 is 1080, key is row0062/info:/1731299802380/Put/seqid=0 2024-11-11T04:37:00,652 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.86 KB at sequenceid=233 (bloomFilter=true), to=hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/.tmp/info/d8108bf334484c839d56ac03c64367d8 2024-11-11T04:37:00,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38995 is added to blk_1073741864_1040 (size=117820) 2024-11-11T04:37:00,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46475 is added to blk_1073741864_1040 (size=117820) 2024-11-11T04:37:00,658 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/.tmp/info/d8108bf334484c839d56ac03c64367d8 as hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/d8108bf334484c839d56ac03c64367d8 2024-11-11T04:37:00,660 DEBUG [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/.tmp/info/3155742c0d39466c93a4488947f1aa75 as hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/3155742c0d39466c93a4488947f1aa75 2024-11-11T04:37:00,662 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/d8108bf334484c839d56ac03c64367d8, entries=17, sequenceid=233, filesize=22.8 K 2024-11-11T04:37:00,663 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~17.86 KB/18292, heapSize ~19.36 KB/19824, currentSize=10.51 KB/10760 for f5f10686e127082b1019007ad1efb944 in 28ms, sequenceid=233, compaction requested=false 2024-11-11T04:37:00,663 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for f5f10686e127082b1019007ad1efb944: 2024-11-11T04:37:00,665 INFO [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in f5f10686e127082b1019007ad1efb944/info of f5f10686e127082b1019007ad1efb944 into 3155742c0d39466c93a4488947f1aa75(size=115.1 K), total size for store is 137.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T04:37:00,665 DEBUG [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for f5f10686e127082b1019007ad1efb944: 2024-11-11T04:37:00,665 INFO [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731299804453.f5f10686e127082b1019007ad1efb944., storeName=f5f10686e127082b1019007ad1efb944/info, priority=13, startTime=1731299820634; duration=0sec 2024-11-11T04:37:00,665 DEBUG [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T04:37:00,665 DEBUG [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f5f10686e127082b1019007ad1efb944:info 2024-11-11T04:37:01,450 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:37:01,450 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:37:02,450 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:37:02,450 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:37:02,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33973 {}] regionserver.HRegion(8855): Flush requested on f5f10686e127082b1019007ad1efb944 2024-11-11T04:37:02,654 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing f5f10686e127082b1019007ad1efb944 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-11T04:37:02,658 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/.tmp/info/ff0aa1c5f02747209e9617dedc7d3343 is 1080, key is row0183/info:/1731299820636/Put/seqid=0 2024-11-11T04:37:02,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38995 is added to blk_1073741865_1041 (size=16828) 2024-11-11T04:37:02,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46475 is added to blk_1073741865_1041 (size=16828) 2024-11-11T04:37:02,666 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=248 (bloomFilter=true), to=hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/.tmp/info/ff0aa1c5f02747209e9617dedc7d3343 2024-11-11T04:37:02,671 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/.tmp/info/ff0aa1c5f02747209e9617dedc7d3343 as hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/ff0aa1c5f02747209e9617dedc7d3343 2024-11-11T04:37:02,677 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/ff0aa1c5f02747209e9617dedc7d3343, entries=11, sequenceid=248, filesize=16.4 K 2024-11-11T04:37:02,678 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=14.71 KB/15064 for f5f10686e127082b1019007ad1efb944 in 24ms, sequenceid=248, compaction requested=true 2024-11-11T04:37:02,678 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for f5f10686e127082b1019007ad1efb944: 2024-11-11T04:37:02,678 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f5f10686e127082b1019007ad1efb944:info, priority=-2147483648, current under compaction store size is 1 2024-11-11T04:37:02,678 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T04:37:02,678 DEBUG [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T04:37:02,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33973 {}] regionserver.HRegion(8855): Flush requested on f5f10686e127082b1019007ad1efb944 2024-11-11T04:37:02,679 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing f5f10686e127082b1019007ad1efb944 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-11-11T04:37:02,679 DEBUG [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 157964 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T04:37:02,679 DEBUG [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] regionserver.HStore(1541): f5f10686e127082b1019007ad1efb944/info is initiating minor compaction (all files) 2024-11-11T04:37:02,679 INFO [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of f5f10686e127082b1019007ad1efb944/info in TestLogRolling-testLogRolling,row0062,1731299804453.f5f10686e127082b1019007ad1efb944. 2024-11-11T04:37:02,679 INFO [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/3155742c0d39466c93a4488947f1aa75, hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/d8108bf334484c839d56ac03c64367d8, hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/ff0aa1c5f02747209e9617dedc7d3343] into tmpdir=hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/.tmp, totalSize=154.3 K 2024-11-11T04:37:02,680 DEBUG [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] compactions.Compactor(225): Compacting 3155742c0d39466c93a4488947f1aa75, keycount=104, bloomtype=ROW, size=115.1 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1731299802380 2024-11-11T04:37:02,680 DEBUG [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] compactions.Compactor(225): Compacting d8108bf334484c839d56ac03c64367d8, keycount=17, bloomtype=ROW, size=22.8 K, encoding=NONE, compression=NONE, seqNum=233, earliestPutTs=1731299820600 2024-11-11T04:37:02,681 DEBUG [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] compactions.Compactor(225): Compacting ff0aa1c5f02747209e9617dedc7d3343, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1731299820636 2024-11-11T04:37:02,683 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/.tmp/info/d72b13c1b22c45f19adfba78bfcc13cf is 1080, key is row0194/info:/1731299822655/Put/seqid=0 2024-11-11T04:37:02,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46475 is added to blk_1073741866_1042 (size=21165) 2024-11-11T04:37:02,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38995 is added to blk_1073741866_1042 (size=21165) 2024-11-11T04:37:02,690 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=266 (bloomFilter=true), to=hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/.tmp/info/d72b13c1b22c45f19adfba78bfcc13cf 2024-11-11T04:37:02,694 INFO [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f5f10686e127082b1019007ad1efb944#info#compaction#82 average throughput is 45.15 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T04:37:02,694 DEBUG [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/.tmp/info/8145f7f2febd414eae6f932ae93c106b is 1080, key is row0062/info:/1731299802380/Put/seqid=0 2024-11-11T04:37:02,695 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/.tmp/info/d72b13c1b22c45f19adfba78bfcc13cf as hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/d72b13c1b22c45f19adfba78bfcc13cf 2024-11-11T04:37:02,700 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/d72b13c1b22c45f19adfba78bfcc13cf, entries=15, sequenceid=266, filesize=20.7 K 2024-11-11T04:37:02,701 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=12.61 KB/12912 for f5f10686e127082b1019007ad1efb944 in 22ms, sequenceid=266, compaction requested=false 2024-11-11T04:37:02,701 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for f5f10686e127082b1019007ad1efb944: 2024-11-11T04:37:02,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33973 {}] regionserver.HRegion(8855): Flush requested on f5f10686e127082b1019007ad1efb944 2024-11-11T04:37:02,701 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing f5f10686e127082b1019007ad1efb944 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-11T04:37:02,705 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/.tmp/info/3f145e9c810145f79fbf78dec9feebef is 1080, key is row0209/info:/1731299822680/Put/seqid=0 2024-11-11T04:37:02,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38995 is added to blk_1073741867_1043 (size=148311) 2024-11-11T04:37:02,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38995 is added to blk_1073741868_1044 (size=19013) 2024-11-11T04:37:02,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46475 is added to blk_1073741867_1043 (size=148311) 2024-11-11T04:37:02,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46475 is added to blk_1073741868_1044 (size=19013) 2024-11-11T04:37:02,716 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=282 (bloomFilter=true), to=hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/.tmp/info/3f145e9c810145f79fbf78dec9feebef 2024-11-11T04:37:02,721 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/.tmp/info/3f145e9c810145f79fbf78dec9feebef as hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/3f145e9c810145f79fbf78dec9feebef 2024-11-11T04:37:02,721 DEBUG [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/.tmp/info/8145f7f2febd414eae6f932ae93c106b as hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/8145f7f2febd414eae6f932ae93c106b 2024-11-11T04:37:02,725 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/3f145e9c810145f79fbf78dec9feebef, entries=13, sequenceid=282, filesize=18.6 K 2024-11-11T04:37:02,726 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=3.15 KB/3228 for f5f10686e127082b1019007ad1efb944 in 25ms, sequenceid=282, compaction requested=false 2024-11-11T04:37:02,726 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for f5f10686e127082b1019007ad1efb944: 2024-11-11T04:37:02,726 INFO [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in f5f10686e127082b1019007ad1efb944/info of f5f10686e127082b1019007ad1efb944 into 8145f7f2febd414eae6f932ae93c106b(size=144.8 K), total size for store is 184.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T04:37:02,726 DEBUG [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for f5f10686e127082b1019007ad1efb944: 2024-11-11T04:37:02,726 INFO [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731299804453.f5f10686e127082b1019007ad1efb944., storeName=f5f10686e127082b1019007ad1efb944/info, priority=13, startTime=1731299822678; duration=0sec 2024-11-11T04:37:02,726 DEBUG [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T04:37:02,726 DEBUG [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f5f10686e127082b1019007ad1efb944:info 2024-11-11T04:37:03,451 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:37:03,451 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:37:04,451 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:37:04,451 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:37:04,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33973 {}] regionserver.HRegion(8855): Flush requested on f5f10686e127082b1019007ad1efb944 2024-11-11T04:37:04,713 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing f5f10686e127082b1019007ad1efb944 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-11T04:37:04,719 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/.tmp/info/62a4ede366624bbd98336d76a6df7583 is 1080, key is row0222/info:/1731299822702/Put/seqid=0 2024-11-11T04:37:04,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46475 is added to blk_1073741869_1045 (size=12523) 2024-11-11T04:37:04,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38995 is added to blk_1073741869_1045 (size=12523) 2024-11-11T04:37:04,725 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=293 (bloomFilter=true), to=hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/.tmp/info/62a4ede366624bbd98336d76a6df7583 2024-11-11T04:37:04,730 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/.tmp/info/62a4ede366624bbd98336d76a6df7583 as hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/62a4ede366624bbd98336d76a6df7583 2024-11-11T04:37:04,735 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/62a4ede366624bbd98336d76a6df7583, entries=7, sequenceid=293, filesize=12.2 K 2024-11-11T04:37:04,736 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=14.71 KB/15064 for f5f10686e127082b1019007ad1efb944 in 24ms, sequenceid=293, compaction requested=true 2024-11-11T04:37:04,736 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for f5f10686e127082b1019007ad1efb944: 2024-11-11T04:37:04,736 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f5f10686e127082b1019007ad1efb944:info, priority=-2147483648, current under compaction store size is 1 2024-11-11T04:37:04,736 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T04:37:04,736 DEBUG [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-11T04:37:04,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33973 {}] regionserver.HRegion(8855): Flush requested on f5f10686e127082b1019007ad1efb944 2024-11-11T04:37:04,736 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing f5f10686e127082b1019007ad1efb944 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-11-11T04:37:04,738 DEBUG [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 201012 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-11T04:37:04,738 DEBUG [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] regionserver.HStore(1541): f5f10686e127082b1019007ad1efb944/info is initiating minor compaction (all files) 2024-11-11T04:37:04,738 INFO [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of f5f10686e127082b1019007ad1efb944/info in TestLogRolling-testLogRolling,row0062,1731299804453.f5f10686e127082b1019007ad1efb944. 2024-11-11T04:37:04,738 INFO [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/8145f7f2febd414eae6f932ae93c106b, hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/d72b13c1b22c45f19adfba78bfcc13cf, hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/3f145e9c810145f79fbf78dec9feebef, hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/62a4ede366624bbd98336d76a6df7583] into tmpdir=hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/.tmp, totalSize=196.3 K 2024-11-11T04:37:04,739 DEBUG [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] compactions.Compactor(225): Compacting 8145f7f2febd414eae6f932ae93c106b, keycount=132, bloomtype=ROW, size=144.8 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1731299802380 2024-11-11T04:37:04,739 DEBUG [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] compactions.Compactor(225): Compacting d72b13c1b22c45f19adfba78bfcc13cf, keycount=15, bloomtype=ROW, size=20.7 K, encoding=NONE, compression=NONE, seqNum=266, earliestPutTs=1731299822655 2024-11-11T04:37:04,740 DEBUG [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] compactions.Compactor(225): Compacting 3f145e9c810145f79fbf78dec9feebef, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=282, earliestPutTs=1731299822680 2024-11-11T04:37:04,740 DEBUG [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] compactions.Compactor(225): Compacting 62a4ede366624bbd98336d76a6df7583, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=293, earliestPutTs=1731299822702 2024-11-11T04:37:04,741 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/.tmp/info/1da2d4c3f97f460e996075ef7f534669 is 1080, key is row0229/info:/1731299824713/Put/seqid=0 2024-11-11T04:37:04,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38995 is added to blk_1073741870_1046 (size=21171) 2024-11-11T04:37:04,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46475 is added to blk_1073741870_1046 (size=21171) 2024-11-11T04:37:04,746 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=311 (bloomFilter=true), to=hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/.tmp/info/1da2d4c3f97f460e996075ef7f534669 2024-11-11T04:37:04,753 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/.tmp/info/1da2d4c3f97f460e996075ef7f534669 as hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/1da2d4c3f97f460e996075ef7f534669 2024-11-11T04:37:04,757 INFO [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f5f10686e127082b1019007ad1efb944#info#compaction#86 average throughput is 42.84 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T04:37:04,758 DEBUG [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/.tmp/info/27327c9bbc7842578792fc63cf3f2b6f is 1080, key is row0062/info:/1731299802380/Put/seqid=0 2024-11-11T04:37:04,759 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/1da2d4c3f97f460e996075ef7f534669, entries=15, sequenceid=311, filesize=20.7 K 2024-11-11T04:37:04,760 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=13.66 KB/13988 for f5f10686e127082b1019007ad1efb944 in 24ms, sequenceid=311, compaction requested=false 2024-11-11T04:37:04,760 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for f5f10686e127082b1019007ad1efb944: 2024-11-11T04:37:04,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38995 is added to blk_1073741871_1047 (size=186214) 2024-11-11T04:37:04,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46475 is added to blk_1073741871_1047 (size=186214) 2024-11-11T04:37:04,768 DEBUG [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/.tmp/info/27327c9bbc7842578792fc63cf3f2b6f as hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/27327c9bbc7842578792fc63cf3f2b6f 2024-11-11T04:37:04,774 INFO [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 4 (all) file(s) in f5f10686e127082b1019007ad1efb944/info of f5f10686e127082b1019007ad1efb944 into 27327c9bbc7842578792fc63cf3f2b6f(size=181.8 K), total size for store is 202.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T04:37:04,774 DEBUG [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for f5f10686e127082b1019007ad1efb944: 2024-11-11T04:37:04,774 INFO [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731299804453.f5f10686e127082b1019007ad1efb944., storeName=f5f10686e127082b1019007ad1efb944/info, priority=12, startTime=1731299824736; duration=0sec 2024-11-11T04:37:04,774 DEBUG [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T04:37:04,774 DEBUG [RS:0;a7bef91497aa:33973-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f5f10686e127082b1019007ad1efb944:info 2024-11-11T04:37:05,147 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1588230740, had cached 0 bytes from a total of 20375 2024-11-11T04:37:05,452 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:37:05,452 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:37:06,452 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:37:06,452 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:37:06,760 INFO [Time-limited test {}] wal.AbstractTestLogRolling(285): after writing there are 0 log files 2024-11-11T04:37:06,760 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor a7bef91497aa%2C33973%2C1731299779381.1731299826760 2024-11-11T04:37:06,766 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:37:06,766 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:37:06,766 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:37:06,766 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:37:06,766 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:37:06,766 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/WALs/a7bef91497aa,33973,1731299779381/a7bef91497aa%2C33973%2C1731299779381.1731299779761 with entries=307, filesize=306.71 KB; new WAL /user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/WALs/a7bef91497aa,33973,1731299779381/a7bef91497aa%2C33973%2C1731299779381.1731299826760 2024-11-11T04:37:06,767 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36417:36417),(127.0.0.1/127.0.0.1:45445:45445)] 2024-11-11T04:37:06,767 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/WALs/a7bef91497aa,33973,1731299779381/a7bef91497aa%2C33973%2C1731299779381.1731299779761 is not closed yet, will try archiving it next time 2024-11-11T04:37:06,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46475 is added to blk_1073741833_1009 (size=314074) 2024-11-11T04:37:06,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38995 is added to blk_1073741833_1009 (size=314074) 2024-11-11T04:37:06,771 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 6f92e86db399ee7ca557aad38d81d716: 2024-11-11T04:37:06,771 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=670 B heapSize=2.02 KB 2024-11-11T04:37:06,775 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/hbase/meta/1588230740/.tmp/info/9e4cb277e1974563b86eab1bc13fa1a9 is 186, key is TestLogRolling-testLogRolling,,1731299804453.6f92e86db399ee7ca557aad38d81d716./info:regioninfo/1731299805104/Put/seqid=0 2024-11-11T04:37:06,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46475 is added to blk_1073741873_1049 (size=6153) 2024-11-11T04:37:06,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38995 is added to blk_1073741873_1049 (size=6153) 2024-11-11T04:37:06,781 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=670 B at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/hbase/meta/1588230740/.tmp/info/9e4cb277e1974563b86eab1bc13fa1a9 2024-11-11T04:37:06,786 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/hbase/meta/1588230740/.tmp/info/9e4cb277e1974563b86eab1bc13fa1a9 as hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/hbase/meta/1588230740/info/9e4cb277e1974563b86eab1bc13fa1a9 2024-11-11T04:37:06,790 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/hbase/meta/1588230740/info/9e4cb277e1974563b86eab1bc13fa1a9, entries=5, sequenceid=21, filesize=6.0 K 2024-11-11T04:37:06,791 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~670 B/670, heapSize ~1.25 KB/1280, currentSize=0 B/0 for 1588230740 in 20ms, sequenceid=21, compaction requested=false 2024-11-11T04:37:06,792 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-11T04:37:06,792 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing f5f10686e127082b1019007ad1efb944 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-11T04:37:06,795 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/.tmp/info/8e6e6bb47bd24d48bbc1b1bb2b2000fa is 1080, key is row0244/info:/1731299824737/Put/seqid=0 2024-11-11T04:37:06,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46475 is added to blk_1073741874_1050 (size=19013) 2024-11-11T04:37:06,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38995 is added to blk_1073741874_1050 (size=19013) 2024-11-11T04:37:06,800 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=328 (bloomFilter=true), to=hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/.tmp/info/8e6e6bb47bd24d48bbc1b1bb2b2000fa 2024-11-11T04:37:06,804 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/.tmp/info/8e6e6bb47bd24d48bbc1b1bb2b2000fa as hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/8e6e6bb47bd24d48bbc1b1bb2b2000fa 2024-11-11T04:37:06,808 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/8e6e6bb47bd24d48bbc1b1bb2b2000fa, entries=13, sequenceid=328, filesize=18.6 K 2024-11-11T04:37:06,809 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=0 B/0 for f5f10686e127082b1019007ad1efb944 in 17ms, sequenceid=328, compaction requested=true 2024-11-11T04:37:06,809 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for f5f10686e127082b1019007ad1efb944: 2024-11-11T04:37:06,809 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor a7bef91497aa%2C33973%2C1731299779381.1731299826809 2024-11-11T04:37:06,813 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:37:06,814 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:37:06,814 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:37:06,814 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:37:06,814 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:37:06,814 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/WALs/a7bef91497aa,33973,1731299779381/a7bef91497aa%2C33973%2C1731299779381.1731299826760 with entries=2, filesize=723 B; new WAL /user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/WALs/a7bef91497aa,33973,1731299779381/a7bef91497aa%2C33973%2C1731299779381.1731299826809 2024-11-11T04:37:06,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46475 is added to blk_1073741872_1048 (size=731) 2024-11-11T04:37:06,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38995 is added to blk_1073741872_1048 (size=731) 2024-11-11T04:37:06,817 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/WALs/a7bef91497aa,33973,1731299779381/a7bef91497aa%2C33973%2C1731299779381.1731299779761 to hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/oldWALs/a7bef91497aa%2C33973%2C1731299779381.1731299779761 2024-11-11T04:37:06,817 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36417:36417),(127.0.0.1/127.0.0.1:45445:45445)] 2024-11-11T04:37:06,818 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [5,000] milli-secs(wait.for.ratio=[1]) 2024-11-11T04:37:06,818 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/WALs/a7bef91497aa,33973,1731299779381/a7bef91497aa%2C33973%2C1731299779381.1731299826760 to hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/oldWALs/a7bef91497aa%2C33973%2C1731299779381.1731299826760 2024-11-11T04:37:06,818 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-11T04:37:06,818 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-11T04:37:06,818 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T04:37:06,819 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T04:37:06,819 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T04:37:06,819 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-11T04:37:06,819 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-11T04:37:06,819 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=17255169, stopped=false 2024-11-11T04:37:06,819 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=a7bef91497aa,43893,1731299779338 2024-11-11T04:37:06,821 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43893-0x101959d98640000, quorum=127.0.0.1:55813, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-11T04:37:06,821 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43893-0x101959d98640000, quorum=127.0.0.1:55813, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:37:06,821 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-11T04:37:06,821 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33973-0x101959d98640001, quorum=127.0.0.1:55813, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-11T04:37:06,821 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33973-0x101959d98640001, quorum=127.0.0.1:55813, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:37:06,821 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-11T04:37:06,821 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T04:37:06,821 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T04:37:06,821 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:33973-0x101959d98640001, quorum=127.0.0.1:55813, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T04:37:06,821 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:43893-0x101959d98640000, quorum=127.0.0.1:55813, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T04:37:06,822 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'a7bef91497aa,33973,1731299779381' ***** 2024-11-11T04:37:06,822 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-11T04:37:06,822 INFO [RS:0;a7bef91497aa:33973 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-11T04:37:06,822 INFO [RS:0;a7bef91497aa:33973 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-11T04:37:06,822 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-11T04:37:06,822 INFO [RS:0;a7bef91497aa:33973 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-11T04:37:06,822 INFO [RS:0;a7bef91497aa:33973 {}] regionserver.HRegionServer(3091): Received CLOSE for 6f92e86db399ee7ca557aad38d81d716 2024-11-11T04:37:06,822 INFO [RS:0;a7bef91497aa:33973 {}] regionserver.HRegionServer(3091): Received CLOSE for f5f10686e127082b1019007ad1efb944 2024-11-11T04:37:06,822 INFO [RS:0;a7bef91497aa:33973 {}] regionserver.HRegionServer(959): stopping server a7bef91497aa,33973,1731299779381 2024-11-11T04:37:06,822 INFO [RS:0;a7bef91497aa:33973 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-11T04:37:06,822 INFO [RS:0;a7bef91497aa:33973 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;a7bef91497aa:33973. 2024-11-11T04:37:06,822 DEBUG [RS_CLOSE_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 6f92e86db399ee7ca557aad38d81d716, disabling compactions & flushes 2024-11-11T04:37:06,822 DEBUG [RS:0;a7bef91497aa:33973 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T04:37:06,822 INFO [RS_CLOSE_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731299804453.6f92e86db399ee7ca557aad38d81d716. 2024-11-11T04:37:06,822 DEBUG [RS:0;a7bef91497aa:33973 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T04:37:06,822 DEBUG [RS_CLOSE_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731299804453.6f92e86db399ee7ca557aad38d81d716. 2024-11-11T04:37:06,823 DEBUG [RS_CLOSE_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731299804453.6f92e86db399ee7ca557aad38d81d716. after waiting 0 ms 2024-11-11T04:37:06,823 DEBUG [RS_CLOSE_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731299804453.6f92e86db399ee7ca557aad38d81d716. 2024-11-11T04:37:06,823 INFO [RS:0;a7bef91497aa:33973 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-11T04:37:06,823 INFO [RS:0;a7bef91497aa:33973 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-11T04:37:06,823 INFO [RS:0;a7bef91497aa:33973 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-11T04:37:06,823 INFO [RS:0;a7bef91497aa:33973 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-11T04:37:06,823 INFO [RS:0;a7bef91497aa:33973 {}] regionserver.HRegionServer(1321): Waiting on 3 regions to close 2024-11-11T04:37:06,823 DEBUG [RS:0;a7bef91497aa:33973 {}] regionserver.HRegionServer(1325): Online Regions={6f92e86db399ee7ca557aad38d81d716=TestLogRolling-testLogRolling,,1731299804453.6f92e86db399ee7ca557aad38d81d716., 1588230740=hbase:meta,,1.1588230740, f5f10686e127082b1019007ad1efb944=TestLogRolling-testLogRolling,row0062,1731299804453.f5f10686e127082b1019007ad1efb944.} 2024-11-11T04:37:06,823 DEBUG [RS:0;a7bef91497aa:33973 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 6f92e86db399ee7ca557aad38d81d716, f5f10686e127082b1019007ad1efb944 2024-11-11T04:37:06,823 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731299804453.6f92e86db399ee7ca557aad38d81d716.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/6f92e86db399ee7ca557aad38d81d716/info/a43b6c0c26664e50bb4d5f761e1f2786.56b1ef1e5f926a0ee91a838978a47cf4->hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/56b1ef1e5f926a0ee91a838978a47cf4/info/a43b6c0c26664e50bb4d5f761e1f2786-bottom] to archive 2024-11-11T04:37:06,823 DEBUG [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-11T04:37:06,823 INFO [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-11T04:37:06,823 DEBUG [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-11T04:37:06,823 DEBUG [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-11T04:37:06,823 DEBUG [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-11T04:37:06,824 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731299804453.6f92e86db399ee7ca557aad38d81d716.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-11T04:37:06,825 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731299804453.6f92e86db399ee7ca557aad38d81d716.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/6f92e86db399ee7ca557aad38d81d716/info/a43b6c0c26664e50bb4d5f761e1f2786.56b1ef1e5f926a0ee91a838978a47cf4 to hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/archive/data/default/TestLogRolling-testLogRolling/6f92e86db399ee7ca557aad38d81d716/info/a43b6c0c26664e50bb4d5f761e1f2786.56b1ef1e5f926a0ee91a838978a47cf4 2024-11-11T04:37:06,825 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731299804453.6f92e86db399ee7ca557aad38d81d716.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=a7bef91497aa:43893 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-11-11T04:37:06,825 WARN [StoreCloser-TestLogRolling-testLogRolling,,1731299804453.6f92e86db399ee7ca557aad38d81d716.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [] 2024-11-11T04:37:06,831 DEBUG [RS_CLOSE_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/6f92e86db399ee7ca557aad38d81d716/recovered.edits/128.seqid, newMaxSeqId=128, maxSeqId=123 2024-11-11T04:37:06,831 DEBUG [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/hbase/meta/1588230740/recovered.edits/24.seqid, newMaxSeqId=24, maxSeqId=1 2024-11-11T04:37:06,832 DEBUG [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-11T04:37:06,832 INFO [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-11T04:37:06,832 INFO [RS_CLOSE_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731299804453.6f92e86db399ee7ca557aad38d81d716. 2024-11-11T04:37:06,832 DEBUG [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731299826823Running coprocessor pre-close hooks at 1731299826823Disabling compacts and flushes for region at 1731299826823Disabling writes for close at 1731299826823Writing region close event to WAL at 1731299826828 (+5 ms)Running coprocessor post-close hooks at 1731299826832 (+4 ms)Closed at 1731299826832 2024-11-11T04:37:06,832 DEBUG [RS_CLOSE_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 6f92e86db399ee7ca557aad38d81d716: Waiting for close lock at 1731299826822Running coprocessor pre-close hooks at 1731299826822Disabling compacts and flushes for region at 1731299826822Disabling writes for close at 1731299826823 (+1 ms)Writing region close event to WAL at 1731299826828 (+5 ms)Running coprocessor post-close hooks at 1731299826832 (+4 ms)Closed at 1731299826832 2024-11-11T04:37:06,832 DEBUG [RS_CLOSE_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,,1731299804453.6f92e86db399ee7ca557aad38d81d716. 2024-11-11T04:37:06,832 DEBUG [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-11T04:37:06,832 DEBUG [RS_CLOSE_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing f5f10686e127082b1019007ad1efb944, disabling compactions & flushes 2024-11-11T04:37:06,832 INFO [RS_CLOSE_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,row0062,1731299804453.f5f10686e127082b1019007ad1efb944. 2024-11-11T04:37:06,832 DEBUG [RS_CLOSE_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,row0062,1731299804453.f5f10686e127082b1019007ad1efb944. 2024-11-11T04:37:06,832 DEBUG [RS_CLOSE_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,row0062,1731299804453.f5f10686e127082b1019007ad1efb944. after waiting 0 ms 2024-11-11T04:37:06,832 DEBUG [RS_CLOSE_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,row0062,1731299804453.f5f10686e127082b1019007ad1efb944. 2024-11-11T04:37:06,833 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731299804453.f5f10686e127082b1019007ad1efb944.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/a43b6c0c26664e50bb4d5f761e1f2786.56b1ef1e5f926a0ee91a838978a47cf4->hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/56b1ef1e5f926a0ee91a838978a47cf4/info/a43b6c0c26664e50bb4d5f761e1f2786-top, hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/TestLogRolling-testLogRolling=56b1ef1e5f926a0ee91a838978a47cf4-54d6c5a866764548a75de064e45c94bf, hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/d754437446214d328ae345c530837f02, hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/TestLogRolling-testLogRolling=56b1ef1e5f926a0ee91a838978a47cf4-32fba6b5280745258deefbccfed62f47, hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/bd651b3ccf354442a6e394f519a1dc43, hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/95d6e0e2c2ff45e883eec59c0ec7fc7e, hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/1c1f0a04a78b4e89b88aa4f3becc0700, hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/014e6281fd35403d90a70b40d07b029d, hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/1015587f9d354d9fba44ba3628213951, hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/f9856905813a41449f83488d78b7a266, hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/06e4f11e9e8448e493a0674dbcfb90b9, hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/3155742c0d39466c93a4488947f1aa75, hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/92eaa615262546dc9214db412b5979eb, hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/d8108bf334484c839d56ac03c64367d8, hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/8145f7f2febd414eae6f932ae93c106b, hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/ff0aa1c5f02747209e9617dedc7d3343, hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/d72b13c1b22c45f19adfba78bfcc13cf, hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/3f145e9c810145f79fbf78dec9feebef, hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/62a4ede366624bbd98336d76a6df7583] to archive 2024-11-11T04:37:06,834 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731299804453.f5f10686e127082b1019007ad1efb944.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-11T04:37:06,835 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731299804453.f5f10686e127082b1019007ad1efb944.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/a43b6c0c26664e50bb4d5f761e1f2786.56b1ef1e5f926a0ee91a838978a47cf4 to hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/archive/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/a43b6c0c26664e50bb4d5f761e1f2786.56b1ef1e5f926a0ee91a838978a47cf4 2024-11-11T04:37:06,836 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731299804453.f5f10686e127082b1019007ad1efb944.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/TestLogRolling-testLogRolling=56b1ef1e5f926a0ee91a838978a47cf4-54d6c5a866764548a75de064e45c94bf to hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/archive/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/TestLogRolling-testLogRolling=56b1ef1e5f926a0ee91a838978a47cf4-54d6c5a866764548a75de064e45c94bf 2024-11-11T04:37:06,837 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731299804453.f5f10686e127082b1019007ad1efb944.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/d754437446214d328ae345c530837f02 to hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/archive/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/d754437446214d328ae345c530837f02 2024-11-11T04:37:06,838 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731299804453.f5f10686e127082b1019007ad1efb944.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/TestLogRolling-testLogRolling=56b1ef1e5f926a0ee91a838978a47cf4-32fba6b5280745258deefbccfed62f47 to hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/archive/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/TestLogRolling-testLogRolling=56b1ef1e5f926a0ee91a838978a47cf4-32fba6b5280745258deefbccfed62f47 2024-11-11T04:37:06,839 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731299804453.f5f10686e127082b1019007ad1efb944.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/bd651b3ccf354442a6e394f519a1dc43 to hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/archive/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/bd651b3ccf354442a6e394f519a1dc43 2024-11-11T04:37:06,840 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731299804453.f5f10686e127082b1019007ad1efb944.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/95d6e0e2c2ff45e883eec59c0ec7fc7e to hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/archive/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/95d6e0e2c2ff45e883eec59c0ec7fc7e 2024-11-11T04:37:06,841 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731299804453.f5f10686e127082b1019007ad1efb944.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/1c1f0a04a78b4e89b88aa4f3becc0700 to hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/archive/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/1c1f0a04a78b4e89b88aa4f3becc0700 2024-11-11T04:37:06,842 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731299804453.f5f10686e127082b1019007ad1efb944.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/014e6281fd35403d90a70b40d07b029d to hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/archive/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/014e6281fd35403d90a70b40d07b029d 2024-11-11T04:37:06,843 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731299804453.f5f10686e127082b1019007ad1efb944.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/1015587f9d354d9fba44ba3628213951 to hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/archive/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/1015587f9d354d9fba44ba3628213951 2024-11-11T04:37:06,844 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731299804453.f5f10686e127082b1019007ad1efb944.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/f9856905813a41449f83488d78b7a266 to hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/archive/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/f9856905813a41449f83488d78b7a266 2024-11-11T04:37:06,845 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731299804453.f5f10686e127082b1019007ad1efb944.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/06e4f11e9e8448e493a0674dbcfb90b9 to hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/archive/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/06e4f11e9e8448e493a0674dbcfb90b9 2024-11-11T04:37:06,846 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731299804453.f5f10686e127082b1019007ad1efb944.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/3155742c0d39466c93a4488947f1aa75 to hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/archive/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/3155742c0d39466c93a4488947f1aa75 2024-11-11T04:37:06,847 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731299804453.f5f10686e127082b1019007ad1efb944.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/92eaa615262546dc9214db412b5979eb to hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/archive/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/92eaa615262546dc9214db412b5979eb 2024-11-11T04:37:06,848 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731299804453.f5f10686e127082b1019007ad1efb944.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/d8108bf334484c839d56ac03c64367d8 to hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/archive/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/d8108bf334484c839d56ac03c64367d8 2024-11-11T04:37:06,849 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731299804453.f5f10686e127082b1019007ad1efb944.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/8145f7f2febd414eae6f932ae93c106b to hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/archive/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/8145f7f2febd414eae6f932ae93c106b 2024-11-11T04:37:06,850 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731299804453.f5f10686e127082b1019007ad1efb944.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/ff0aa1c5f02747209e9617dedc7d3343 to hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/archive/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/ff0aa1c5f02747209e9617dedc7d3343 2024-11-11T04:37:06,851 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731299804453.f5f10686e127082b1019007ad1efb944.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/d72b13c1b22c45f19adfba78bfcc13cf to hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/archive/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/d72b13c1b22c45f19adfba78bfcc13cf 2024-11-11T04:37:06,851 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731299804453.f5f10686e127082b1019007ad1efb944.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/3f145e9c810145f79fbf78dec9feebef to hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/archive/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/3f145e9c810145f79fbf78dec9feebef 2024-11-11T04:37:06,853 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731299804453.f5f10686e127082b1019007ad1efb944.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/62a4ede366624bbd98336d76a6df7583 to hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/archive/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/info/62a4ede366624bbd98336d76a6df7583 2024-11-11T04:37:06,853 WARN [StoreCloser-TestLogRolling-testLogRolling,row0062,1731299804453.f5f10686e127082b1019007ad1efb944.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [d754437446214d328ae345c530837f02=42984, bd651b3ccf354442a6e394f519a1dc43=12515, 95d6e0e2c2ff45e883eec59c0ec7fc7e=66869, 1c1f0a04a78b4e89b88aa4f3becc0700=21156, 014e6281fd35403d90a70b40d07b029d=16828, 1015587f9d354d9fba44ba3628213951=95076, f9856905813a41449f83488d78b7a266=21156, 06e4f11e9e8448e493a0674dbcfb90b9=20078, 3155742c0d39466c93a4488947f1aa75=117820, 92eaa615262546dc9214db412b5979eb=12516, d8108bf334484c839d56ac03c64367d8=23316, 8145f7f2febd414eae6f932ae93c106b=148311, ff0aa1c5f02747209e9617dedc7d3343=16828, d72b13c1b22c45f19adfba78bfcc13cf=21165, 3f145e9c810145f79fbf78dec9feebef=19013, 62a4ede366624bbd98336d76a6df7583=12523] 2024-11-11T04:37:06,856 DEBUG [RS_CLOSE_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/data/default/TestLogRolling-testLogRolling/f5f10686e127082b1019007ad1efb944/recovered.edits/331.seqid, newMaxSeqId=331, maxSeqId=123 2024-11-11T04:37:06,857 INFO [RS_CLOSE_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,row0062,1731299804453.f5f10686e127082b1019007ad1efb944. 2024-11-11T04:37:06,857 DEBUG [RS_CLOSE_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for f5f10686e127082b1019007ad1efb944: Waiting for close lock at 1731299826832Running coprocessor pre-close hooks at 1731299826832Disabling compacts and flushes for region at 1731299826832Disabling writes for close at 1731299826832Writing region close event to WAL at 1731299826853 (+21 ms)Running coprocessor post-close hooks at 1731299826857 (+4 ms)Closed at 1731299826857 2024-11-11T04:37:06,857 DEBUG [RS_CLOSE_REGION-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,row0062,1731299804453.f5f10686e127082b1019007ad1efb944. 2024-11-11T04:37:07,023 INFO [RS:0;a7bef91497aa:33973 {}] regionserver.HRegionServer(976): stopping server a7bef91497aa,33973,1731299779381; all regions closed. 2024-11-11T04:37:07,024 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:37:07,024 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:37:07,024 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:37:07,024 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:37:07,024 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:37:07,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38995 is added to blk_1073741834_1010 (size=8107) 2024-11-11T04:37:07,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46475 is added to blk_1073741834_1010 (size=8107) 2024-11-11T04:37:07,028 DEBUG [RS:0;a7bef91497aa:33973 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/oldWALs 2024-11-11T04:37:07,028 INFO [RS:0;a7bef91497aa:33973 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog a7bef91497aa%2C33973%2C1731299779381.meta:.meta(num 1731299780134) 2024-11-11T04:37:07,028 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:37:07,028 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:37:07,029 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:37:07,029 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:37:07,029 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:37:07,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46475 is added to blk_1073741875_1051 (size=779) 2024-11-11T04:37:07,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38995 is added to blk_1073741875_1051 (size=779) 2024-11-11T04:37:07,032 DEBUG [RS:0;a7bef91497aa:33973 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/oldWALs 2024-11-11T04:37:07,032 INFO [RS:0;a7bef91497aa:33973 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog a7bef91497aa%2C33973%2C1731299779381:(num 1731299826809) 2024-11-11T04:37:07,032 DEBUG [RS:0;a7bef91497aa:33973 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T04:37:07,032 INFO [RS:0;a7bef91497aa:33973 {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T04:37:07,032 INFO [RS:0;a7bef91497aa:33973 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-11T04:37:07,032 INFO [RS:0;a7bef91497aa:33973 {}] hbase.ChoreService(370): Chore service for: regionserver/a7bef91497aa:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-11T04:37:07,032 INFO [RS:0;a7bef91497aa:33973 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-11T04:37:07,033 INFO [regionserver/a7bef91497aa:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-11T04:37:07,033 INFO [RS:0;a7bef91497aa:33973 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33973 2024-11-11T04:37:07,035 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33973-0x101959d98640001, quorum=127.0.0.1:55813, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/a7bef91497aa,33973,1731299779381 2024-11-11T04:37:07,035 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43893-0x101959d98640000, quorum=127.0.0.1:55813, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-11T04:37:07,035 INFO [RS:0;a7bef91497aa:33973 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-11T04:37:07,036 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [a7bef91497aa,33973,1731299779381] 2024-11-11T04:37:07,037 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/a7bef91497aa,33973,1731299779381 already deleted, retry=false 2024-11-11T04:37:07,037 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; a7bef91497aa,33973,1731299779381 expired; onlineServers=0 2024-11-11T04:37:07,037 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'a7bef91497aa,43893,1731299779338' ***** 2024-11-11T04:37:07,037 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-11T04:37:07,037 INFO [M:0;a7bef91497aa:43893 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-11T04:37:07,037 INFO [M:0;a7bef91497aa:43893 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-11T04:37:07,037 DEBUG [M:0;a7bef91497aa:43893 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-11T04:37:07,037 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-11T04:37:07,037 DEBUG [M:0;a7bef91497aa:43893 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-11T04:37:07,037 DEBUG [master/a7bef91497aa:0:becomeActiveMaster-HFileCleaner.large.0-1731299779535 {}] cleaner.HFileCleaner(306): Exit Thread[master/a7bef91497aa:0:becomeActiveMaster-HFileCleaner.large.0-1731299779535,5,FailOnTimeoutGroup] 2024-11-11T04:37:07,037 DEBUG [master/a7bef91497aa:0:becomeActiveMaster-HFileCleaner.small.0-1731299779536 {}] cleaner.HFileCleaner(306): Exit Thread[master/a7bef91497aa:0:becomeActiveMaster-HFileCleaner.small.0-1731299779536,5,FailOnTimeoutGroup] 2024-11-11T04:37:07,037 INFO [M:0;a7bef91497aa:43893 {}] hbase.ChoreService(370): Chore service for: master/a7bef91497aa:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-11T04:37:07,038 INFO [M:0;a7bef91497aa:43893 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-11T04:37:07,038 DEBUG [M:0;a7bef91497aa:43893 {}] master.HMaster(1795): Stopping service threads 2024-11-11T04:37:07,038 INFO [M:0;a7bef91497aa:43893 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-11T04:37:07,038 INFO [M:0;a7bef91497aa:43893 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-11T04:37:07,038 ERROR [M:0;a7bef91497aa:43893 {}] procedure2.ProcedureExecutor(763): There are still active thread in group java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10], see STDOUT java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10] Thread[IPC Parameter Sending Thread for localhost/127.0.0.1:36477,5,PEWorkerGroup] 2024-11-11T04:37:07,038 INFO [M:0;a7bef91497aa:43893 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-11T04:37:07,038 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-11T04:37:07,038 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43893-0x101959d98640000, quorum=127.0.0.1:55813, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-11T04:37:07,038 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43893-0x101959d98640000, quorum=127.0.0.1:55813, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:37:07,039 DEBUG [M:0;a7bef91497aa:43893 {}] zookeeper.ZKUtil(347): master:43893-0x101959d98640000, quorum=127.0.0.1:55813, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-11T04:37:07,039 WARN [M:0;a7bef91497aa:43893 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-11T04:37:07,039 INFO [M:0;a7bef91497aa:43893 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/.lastflushedseqids 2024-11-11T04:37:07,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46475 is added to blk_1073741876_1052 (size=228) 2024-11-11T04:37:07,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38995 is added to blk_1073741876_1052 (size=228) 2024-11-11T04:37:07,046 INFO [M:0;a7bef91497aa:43893 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-11T04:37:07,046 INFO [M:0;a7bef91497aa:43893 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-11T04:37:07,046 DEBUG [M:0;a7bef91497aa:43893 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-11T04:37:07,046 INFO [M:0;a7bef91497aa:43893 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T04:37:07,046 DEBUG [M:0;a7bef91497aa:43893 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T04:37:07,046 DEBUG [M:0;a7bef91497aa:43893 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-11T04:37:07,046 DEBUG [M:0;a7bef91497aa:43893 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T04:37:07,046 INFO [M:0;a7bef91497aa:43893 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=53.68 KB heapSize=65.90 KB 2024-11-11T04:37:07,062 DEBUG [M:0;a7bef91497aa:43893 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/8ad2755696a54940b1762a56752d7648 is 82, key is hbase:meta,,1/info:regioninfo/1731299780161/Put/seqid=0 2024-11-11T04:37:07,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38995 is added to blk_1073741877_1053 (size=5672) 2024-11-11T04:37:07,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46475 is added to blk_1073741877_1053 (size=5672) 2024-11-11T04:37:07,066 INFO [M:0;a7bef91497aa:43893 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/8ad2755696a54940b1762a56752d7648 2024-11-11T04:37:07,085 DEBUG [M:0;a7bef91497aa:43893 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/cd472f71091b474fb45d87b06d4f671e is 749, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731299780575/Put/seqid=0 2024-11-11T04:37:07,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38995 is added to blk_1073741878_1054 (size=7679) 2024-11-11T04:37:07,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46475 is added to blk_1073741878_1054 (size=7679) 2024-11-11T04:37:07,090 INFO [M:0;a7bef91497aa:43893 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.08 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/cd472f71091b474fb45d87b06d4f671e 2024-11-11T04:37:07,094 INFO [M:0;a7bef91497aa:43893 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for cd472f71091b474fb45d87b06d4f671e 2024-11-11T04:37:07,108 DEBUG [M:0;a7bef91497aa:43893 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/8da4cde9a0314adf83ceab761e0d85b7 is 69, key is a7bef91497aa,33973,1731299779381/rs:state/1731299779617/Put/seqid=0 2024-11-11T04:37:07,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46475 is added to blk_1073741879_1055 (size=5156) 2024-11-11T04:37:07,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38995 is added to blk_1073741879_1055 (size=5156) 2024-11-11T04:37:07,112 INFO [M:0;a7bef91497aa:43893 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/8da4cde9a0314adf83ceab761e0d85b7 2024-11-11T04:37:07,129 DEBUG [M:0;a7bef91497aa:43893 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/093846c4316240c6aeee944167491357 is 52, key is load_balancer_on/state:d/1731299780207/Put/seqid=0 2024-11-11T04:37:07,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46475 is added to blk_1073741880_1056 (size=5056) 2024-11-11T04:37:07,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38995 is added to blk_1073741880_1056 (size=5056) 2024-11-11T04:37:07,134 INFO [M:0;a7bef91497aa:43893 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/093846c4316240c6aeee944167491357 2024-11-11T04:37:07,136 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33973-0x101959d98640001, quorum=127.0.0.1:55813, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T04:37:07,136 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33973-0x101959d98640001, quorum=127.0.0.1:55813, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T04:37:07,136 INFO [RS:0;a7bef91497aa:33973 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-11T04:37:07,136 INFO [RS:0;a7bef91497aa:33973 {}] regionserver.HRegionServer(1031): Exiting; stopping=a7bef91497aa,33973,1731299779381; zookeeper connection closed. 2024-11-11T04:37:07,136 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@566de43b {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@566de43b 2024-11-11T04:37:07,136 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-11T04:37:07,139 DEBUG [M:0;a7bef91497aa:43893 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/8ad2755696a54940b1762a56752d7648 as hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/8ad2755696a54940b1762a56752d7648 2024-11-11T04:37:07,143 INFO [M:0;a7bef91497aa:43893 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/8ad2755696a54940b1762a56752d7648, entries=8, sequenceid=129, filesize=5.5 K 2024-11-11T04:37:07,144 DEBUG [M:0;a7bef91497aa:43893 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/cd472f71091b474fb45d87b06d4f671e as hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/cd472f71091b474fb45d87b06d4f671e 2024-11-11T04:37:07,149 INFO [M:0;a7bef91497aa:43893 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for cd472f71091b474fb45d87b06d4f671e 2024-11-11T04:37:07,149 INFO [M:0;a7bef91497aa:43893 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/cd472f71091b474fb45d87b06d4f671e, entries=14, sequenceid=129, filesize=7.5 K 2024-11-11T04:37:07,149 DEBUG [M:0;a7bef91497aa:43893 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/8da4cde9a0314adf83ceab761e0d85b7 as hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/8da4cde9a0314adf83ceab761e0d85b7 2024-11-11T04:37:07,153 INFO [M:0;a7bef91497aa:43893 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/8da4cde9a0314adf83ceab761e0d85b7, entries=1, sequenceid=129, filesize=5.0 K 2024-11-11T04:37:07,154 DEBUG [M:0;a7bef91497aa:43893 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/093846c4316240c6aeee944167491357 as hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/093846c4316240c6aeee944167491357 2024-11-11T04:37:07,157 INFO [M:0;a7bef91497aa:43893 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36477/user/jenkins/test-data/ecf9a1b2-9b36-8b20-760c-6803f96bd9cf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/093846c4316240c6aeee944167491357, entries=1, sequenceid=129, filesize=4.9 K 2024-11-11T04:37:07,158 INFO [M:0;a7bef91497aa:43893 {}] regionserver.HRegion(3140): Finished flush of dataSize ~53.68 KB/54973, heapSize ~65.84 KB/67416, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 112ms, sequenceid=129, compaction requested=false 2024-11-11T04:37:07,160 INFO [M:0;a7bef91497aa:43893 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T04:37:07,160 DEBUG [M:0;a7bef91497aa:43893 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731299827046Disabling compacts and flushes for region at 1731299827046Disabling writes for close at 1731299827046Obtaining lock to block concurrent updates at 1731299827046Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731299827046Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=54973, getHeapSize=67416, getOffHeapSize=0, getCellsCount=152 at 1731299827047 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731299827047Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731299827047Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731299827061 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731299827061Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731299827071 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731299827085 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731299827085Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731299827094 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731299827107 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731299827107Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731299827116 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731299827128 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731299827128Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7f389d33: reopening flushed file at 1731299827138 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3119119e: reopening flushed file at 1731299827144 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@11bee5ba: reopening flushed file at 1731299827149 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3b398ffe: reopening flushed file at 1731299827153 (+4 ms)Finished flush of dataSize ~53.68 KB/54973, heapSize ~65.84 KB/67416, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 112ms, sequenceid=129, compaction requested=false at 1731299827158 (+5 ms)Writing region close event to WAL at 1731299827160 (+2 ms)Closed at 1731299827160 2024-11-11T04:37:07,160 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:37:07,160 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:37:07,160 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:37:07,160 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:37:07,160 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:37:07,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46475 is added to blk_1073741830_1006 (size=63903) 2024-11-11T04:37:07,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38995 is added to blk_1073741830_1006 (size=63903) 2024-11-11T04:37:07,163 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-11T04:37:07,163 INFO [M:0;a7bef91497aa:43893 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-11T04:37:07,163 INFO [M:0;a7bef91497aa:43893 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:43893 2024-11-11T04:37:07,164 INFO [M:0;a7bef91497aa:43893 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-11T04:37:07,265 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43893-0x101959d98640000, quorum=127.0.0.1:55813, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T04:37:07,265 INFO [M:0;a7bef91497aa:43893 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-11T04:37:07,265 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43893-0x101959d98640000, quorum=127.0.0.1:55813, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T04:37:07,269 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2f932cc{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T04:37:07,269 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5ff7780b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T04:37:07,269 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T04:37:07,270 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2735da07{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T04:37:07,270 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@16ccf5f4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2228693d-6e20-1217-9fca-ff7b84c6b80a/hadoop.log.dir/,STOPPED} 2024-11-11T04:37:07,271 WARN [BP-1693491921-172.17.0.2-1731299778654 heartbeating to localhost/127.0.0.1:36477 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T04:37:07,271 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T04:37:07,271 WARN [BP-1693491921-172.17.0.2-1731299778654 heartbeating to localhost/127.0.0.1:36477 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1693491921-172.17.0.2-1731299778654 (Datanode Uuid 805913f2-e027-42e2-b57f-203e3a22905f) service to localhost/127.0.0.1:36477 2024-11-11T04:37:07,271 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T04:37:07,272 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2228693d-6e20-1217-9fca-ff7b84c6b80a/cluster_052c37d9-502d-aa7a-5c87-77b2a5202b86/data/data3/current/BP-1693491921-172.17.0.2-1731299778654 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T04:37:07,272 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2228693d-6e20-1217-9fca-ff7b84c6b80a/cluster_052c37d9-502d-aa7a-5c87-77b2a5202b86/data/data4/current/BP-1693491921-172.17.0.2-1731299778654 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T04:37:07,272 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T04:37:07,274 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4a67ff9c{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T04:37:07,274 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3bd9a438{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T04:37:07,274 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T04:37:07,274 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6f8f17a1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T04:37:07,274 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@201bdbf9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2228693d-6e20-1217-9fca-ff7b84c6b80a/hadoop.log.dir/,STOPPED} 2024-11-11T04:37:07,276 WARN [BP-1693491921-172.17.0.2-1731299778654 heartbeating to localhost/127.0.0.1:36477 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T04:37:07,276 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T04:37:07,276 WARN [BP-1693491921-172.17.0.2-1731299778654 heartbeating to localhost/127.0.0.1:36477 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1693491921-172.17.0.2-1731299778654 (Datanode Uuid b545a042-33f7-456b-bd71-2763fff6b029) service to localhost/127.0.0.1:36477 2024-11-11T04:37:07,276 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T04:37:07,276 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2228693d-6e20-1217-9fca-ff7b84c6b80a/cluster_052c37d9-502d-aa7a-5c87-77b2a5202b86/data/data1/current/BP-1693491921-172.17.0.2-1731299778654 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T04:37:07,276 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2228693d-6e20-1217-9fca-ff7b84c6b80a/cluster_052c37d9-502d-aa7a-5c87-77b2a5202b86/data/data2/current/BP-1693491921-172.17.0.2-1731299778654 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T04:37:07,277 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T04:37:07,282 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2489695e{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-11T04:37:07,283 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@25570184{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T04:37:07,283 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T04:37:07,283 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1fda4535{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T04:37:07,283 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@52d230c9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2228693d-6e20-1217-9fca-ff7b84c6b80a/hadoop.log.dir/,STOPPED} 2024-11-11T04:37:07,290 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-11T04:37:07,314 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-11T04:37:07,322 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRolling Thread=231 (was 205) Potentially hanging thread: nioEventLoopGroup-40-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36477 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:36477 from jenkins.hfs.6 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36477 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:36477 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-40-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.6@localhost:36477 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36477 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36477 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:36477 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:36477 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-14-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=515 (was 483) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=24 (was 4) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=6546 (was 6585) 2024-11-11T04:37:07,330 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=231, OpenFileDescriptor=515, MaxFileDescriptor=1048576, SystemLoadAverage=24, ProcessCount=11, AvailableMemoryMB=6546 2024-11-11T04:37:07,330 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-11T04:37:07,330 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2228693d-6e20-1217-9fca-ff7b84c6b80a/hadoop.log.dir so I do NOT create it in target/test-data/6c163589-cec0-7352-bd87-a2f16d3d09b9 2024-11-11T04:37:07,330 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2228693d-6e20-1217-9fca-ff7b84c6b80a/hadoop.tmp.dir so I do NOT create it in target/test-data/6c163589-cec0-7352-bd87-a2f16d3d09b9 2024-11-11T04:37:07,330 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c163589-cec0-7352-bd87-a2f16d3d09b9/cluster_9aa1663f-04a2-2473-29c0-16199c4a7eb2, deleteOnExit=true 2024-11-11T04:37:07,330 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-11T04:37:07,331 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c163589-cec0-7352-bd87-a2f16d3d09b9/test.cache.data in system properties and HBase conf 2024-11-11T04:37:07,331 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c163589-cec0-7352-bd87-a2f16d3d09b9/hadoop.tmp.dir in system properties and HBase conf 2024-11-11T04:37:07,331 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c163589-cec0-7352-bd87-a2f16d3d09b9/hadoop.log.dir in system properties and HBase conf 2024-11-11T04:37:07,331 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c163589-cec0-7352-bd87-a2f16d3d09b9/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-11T04:37:07,331 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c163589-cec0-7352-bd87-a2f16d3d09b9/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-11T04:37:07,331 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-11T04:37:07,331 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-11T04:37:07,331 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c163589-cec0-7352-bd87-a2f16d3d09b9/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-11T04:37:07,331 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c163589-cec0-7352-bd87-a2f16d3d09b9/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-11T04:37:07,331 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c163589-cec0-7352-bd87-a2f16d3d09b9/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-11T04:37:07,331 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c163589-cec0-7352-bd87-a2f16d3d09b9/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-11T04:37:07,331 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c163589-cec0-7352-bd87-a2f16d3d09b9/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-11T04:37:07,331 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c163589-cec0-7352-bd87-a2f16d3d09b9/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-11T04:37:07,331 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c163589-cec0-7352-bd87-a2f16d3d09b9/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-11T04:37:07,331 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c163589-cec0-7352-bd87-a2f16d3d09b9/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-11T04:37:07,331 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c163589-cec0-7352-bd87-a2f16d3d09b9/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-11T04:37:07,332 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c163589-cec0-7352-bd87-a2f16d3d09b9/nfs.dump.dir in system properties and HBase conf 2024-11-11T04:37:07,332 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c163589-cec0-7352-bd87-a2f16d3d09b9/java.io.tmpdir in system properties and HBase conf 2024-11-11T04:37:07,332 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c163589-cec0-7352-bd87-a2f16d3d09b9/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-11T04:37:07,332 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c163589-cec0-7352-bd87-a2f16d3d09b9/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-11T04:37:07,332 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c163589-cec0-7352-bd87-a2f16d3d09b9/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-11T04:37:07,344 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-11T04:37:07,400 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T04:37:07,403 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T04:37:07,404 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T04:37:07,404 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T04:37:07,404 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-11T04:37:07,405 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T04:37:07,405 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@43f8fe5e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c163589-cec0-7352-bd87-a2f16d3d09b9/hadoop.log.dir/,AVAILABLE} 2024-11-11T04:37:07,405 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@44711d59{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T04:37:07,453 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:37:07,453 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:37:07,456 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-11T04:37:07,457 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-11T04:37:07,457 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-11T04:37:07,457 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-11-11T04:37:07,515 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@61290c18{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c163589-cec0-7352-bd87-a2f16d3d09b9/java.io.tmpdir/jetty-localhost-45063-hadoop-hdfs-3_4_1-tests_jar-_-any-9004788017550893107/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-11T04:37:07,516 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3287f588{HTTP/1.1, (http/1.1)}{localhost:45063} 2024-11-11T04:37:07,516 INFO [Time-limited test {}] server.Server(415): Started @285318ms 2024-11-11T04:37:07,528 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-11T04:37:07,578 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T04:37:07,581 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T04:37:07,581 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T04:37:07,581 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T04:37:07,582 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-11T04:37:07,582 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4b443d30{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c163589-cec0-7352-bd87-a2f16d3d09b9/hadoop.log.dir/,AVAILABLE} 2024-11-11T04:37:07,582 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1391b748{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T04:37:07,630 INFO [regionserver/a7bef91497aa:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T04:37:07,692 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2c0b3275{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c163589-cec0-7352-bd87-a2f16d3d09b9/java.io.tmpdir/jetty-localhost-36045-hadoop-hdfs-3_4_1-tests_jar-_-any-8288722484892490731/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T04:37:07,693 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5bfccd5{HTTP/1.1, (http/1.1)}{localhost:36045} 2024-11-11T04:37:07,693 INFO [Time-limited test {}] server.Server(415): Started @285495ms 2024-11-11T04:37:07,694 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T04:37:07,720 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T04:37:07,722 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T04:37:07,723 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T04:37:07,723 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T04:37:07,723 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-11T04:37:07,723 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@f33170f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c163589-cec0-7352-bd87-a2f16d3d09b9/hadoop.log.dir/,AVAILABLE} 2024-11-11T04:37:07,724 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1e56b299{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T04:37:07,776 WARN [Thread-2461 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c163589-cec0-7352-bd87-a2f16d3d09b9/cluster_9aa1663f-04a2-2473-29c0-16199c4a7eb2/data/data1/current/BP-2009007627-172.17.0.2-1731299827350/current, will proceed with Du for space computation calculation, 2024-11-11T04:37:07,776 WARN [Thread-2462 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c163589-cec0-7352-bd87-a2f16d3d09b9/cluster_9aa1663f-04a2-2473-29c0-16199c4a7eb2/data/data2/current/BP-2009007627-172.17.0.2-1731299827350/current, will proceed with Du for space computation calculation, 2024-11-11T04:37:07,796 WARN [Thread-2440 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T04:37:07,799 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x29ee9703e3ce59d4 with lease ID 0x7ca42982fb45472d: Processing first storage report for DS-28b2489c-99cc-499b-8303-b788eb85fd68 from datanode DatanodeRegistration(127.0.0.1:42341, datanodeUuid=0f90b3cf-520e-4972-8f8a-5d20f334aa24, infoPort=42003, infoSecurePort=0, ipcPort=38055, storageInfo=lv=-57;cid=testClusterID;nsid=482380557;c=1731299827350) 2024-11-11T04:37:07,799 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x29ee9703e3ce59d4 with lease ID 0x7ca42982fb45472d: from storage DS-28b2489c-99cc-499b-8303-b788eb85fd68 node DatanodeRegistration(127.0.0.1:42341, datanodeUuid=0f90b3cf-520e-4972-8f8a-5d20f334aa24, infoPort=42003, infoSecurePort=0, ipcPort=38055, storageInfo=lv=-57;cid=testClusterID;nsid=482380557;c=1731299827350), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T04:37:07,799 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x29ee9703e3ce59d4 with lease ID 0x7ca42982fb45472d: Processing first storage report for DS-11baebe4-eecf-418d-982e-d8f5eab4d511 from datanode DatanodeRegistration(127.0.0.1:42341, datanodeUuid=0f90b3cf-520e-4972-8f8a-5d20f334aa24, infoPort=42003, infoSecurePort=0, ipcPort=38055, storageInfo=lv=-57;cid=testClusterID;nsid=482380557;c=1731299827350) 2024-11-11T04:37:07,799 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x29ee9703e3ce59d4 with lease ID 0x7ca42982fb45472d: from storage DS-11baebe4-eecf-418d-982e-d8f5eab4d511 node DatanodeRegistration(127.0.0.1:42341, datanodeUuid=0f90b3cf-520e-4972-8f8a-5d20f334aa24, infoPort=42003, infoSecurePort=0, ipcPort=38055, storageInfo=lv=-57;cid=testClusterID;nsid=482380557;c=1731299827350), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T04:37:07,835 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6148e76b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c163589-cec0-7352-bd87-a2f16d3d09b9/java.io.tmpdir/jetty-localhost-40293-hadoop-hdfs-3_4_1-tests_jar-_-any-3961976121434459258/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T04:37:07,836 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@352a3917{HTTP/1.1, (http/1.1)}{localhost:40293} 2024-11-11T04:37:07,836 INFO [Time-limited test {}] server.Server(415): Started @285638ms 2024-11-11T04:37:07,837 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T04:37:07,926 WARN [Thread-2488 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c163589-cec0-7352-bd87-a2f16d3d09b9/cluster_9aa1663f-04a2-2473-29c0-16199c4a7eb2/data/data4/current/BP-2009007627-172.17.0.2-1731299827350/current, will proceed with Du for space computation calculation, 2024-11-11T04:37:07,926 WARN [Thread-2487 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c163589-cec0-7352-bd87-a2f16d3d09b9/cluster_9aa1663f-04a2-2473-29c0-16199c4a7eb2/data/data3/current/BP-2009007627-172.17.0.2-1731299827350/current, will proceed with Du for space computation calculation, 2024-11-11T04:37:07,948 WARN [Thread-2476 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T04:37:07,951 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7892cac87b23454c with lease ID 0x7ca42982fb45472e: Processing first storage report for DS-a3e71416-5882-4d3d-b3c1-b795817ffff9 from datanode DatanodeRegistration(127.0.0.1:39623, datanodeUuid=a65446cb-4a0e-4a18-bf88-9efaa4f46e6d, infoPort=35151, infoSecurePort=0, ipcPort=41109, storageInfo=lv=-57;cid=testClusterID;nsid=482380557;c=1731299827350) 2024-11-11T04:37:07,951 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7892cac87b23454c with lease ID 0x7ca42982fb45472e: from storage DS-a3e71416-5882-4d3d-b3c1-b795817ffff9 node DatanodeRegistration(127.0.0.1:39623, datanodeUuid=a65446cb-4a0e-4a18-bf88-9efaa4f46e6d, infoPort=35151, infoSecurePort=0, ipcPort=41109, storageInfo=lv=-57;cid=testClusterID;nsid=482380557;c=1731299827350), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T04:37:07,951 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7892cac87b23454c with lease ID 0x7ca42982fb45472e: Processing first storage report for DS-59f14427-a5a7-447e-936b-f265e82e63f1 from datanode DatanodeRegistration(127.0.0.1:39623, datanodeUuid=a65446cb-4a0e-4a18-bf88-9efaa4f46e6d, infoPort=35151, infoSecurePort=0, ipcPort=41109, storageInfo=lv=-57;cid=testClusterID;nsid=482380557;c=1731299827350) 2024-11-11T04:37:07,951 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7892cac87b23454c with lease ID 0x7ca42982fb45472e: from storage DS-59f14427-a5a7-447e-936b-f265e82e63f1 node DatanodeRegistration(127.0.0.1:39623, datanodeUuid=a65446cb-4a0e-4a18-bf88-9efaa4f46e6d, infoPort=35151, infoSecurePort=0, ipcPort=41109, storageInfo=lv=-57;cid=testClusterID;nsid=482380557;c=1731299827350), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T04:37:07,956 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c163589-cec0-7352-bd87-a2f16d3d09b9 2024-11-11T04:37:07,959 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c163589-cec0-7352-bd87-a2f16d3d09b9/cluster_9aa1663f-04a2-2473-29c0-16199c4a7eb2/zookeeper_0, clientPort=62865, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c163589-cec0-7352-bd87-a2f16d3d09b9/cluster_9aa1663f-04a2-2473-29c0-16199c4a7eb2/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c163589-cec0-7352-bd87-a2f16d3d09b9/cluster_9aa1663f-04a2-2473-29c0-16199c4a7eb2/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-11T04:37:07,960 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=62865 2024-11-11T04:37:07,960 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T04:37:07,961 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T04:37:07,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42341 is added to blk_1073741825_1001 (size=7) 2024-11-11T04:37:07,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39623 is added to blk_1073741825_1001 (size=7) 2024-11-11T04:37:07,970 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:42503/user/jenkins/test-data/f01b0935-f454-2aee-3762-cc3d9126fc44 with version=8 2024-11-11T04:37:07,970 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:33227/user/jenkins/test-data/43dca76b-e8c9-757a-6fe0-e0e9131470e5/hbase-staging 2024-11-11T04:37:07,972 INFO [Time-limited test {}] client.ConnectionUtils(128): master/a7bef91497aa:0 server-side Connection retries=45 2024-11-11T04:37:07,972 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T04:37:07,972 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-11T04:37:07,972 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-11T04:37:07,972 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T04:37:07,972 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-11T04:37:07,972 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-11T04:37:07,972 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-11T04:37:07,973 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34583 2024-11-11T04:37:07,974 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:34583 connecting to ZooKeeper ensemble=127.0.0.1:62865 2024-11-11T04:37:07,979 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:345830x0, quorum=127.0.0.1:62865, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-11T04:37:07,980 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:34583-0x101959e565f0000 connected 2024-11-11T04:37:07,996 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T04:37:07,998 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T04:37:07,999 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34583-0x101959e565f0000, quorum=127.0.0.1:62865, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T04:37:07,999 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:42503/user/jenkins/test-data/f01b0935-f454-2aee-3762-cc3d9126fc44, hbase.cluster.distributed=false 2024-11-11T04:37:08,001 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34583-0x101959e565f0000, quorum=127.0.0.1:62865, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-11T04:37:08,001 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34583 2024-11-11T04:37:08,002 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34583 2024-11-11T04:37:08,002 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34583 2024-11-11T04:37:08,002 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34583 2024-11-11T04:37:08,002 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34583 2024-11-11T04:37:08,017 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/a7bef91497aa:0 server-side Connection retries=45 2024-11-11T04:37:08,017 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T04:37:08,017 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-11T04:37:08,017 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-11T04:37:08,017 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T04:37:08,017 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-11T04:37:08,017 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-11T04:37:08,017 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-11T04:37:08,018 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42693 2024-11-11T04:37:08,018 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:42693 connecting to ZooKeeper ensemble=127.0.0.1:62865 2024-11-11T04:37:08,019 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T04:37:08,020 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T04:37:08,024 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:426930x0, quorum=127.0.0.1:62865, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-11T04:37:08,024 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:426930x0, quorum=127.0.0.1:62865, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T04:37:08,024 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42693-0x101959e565f0001 connected 2024-11-11T04:37:08,024 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-11T04:37:08,025 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-11T04:37:08,025 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42693-0x101959e565f0001, quorum=127.0.0.1:62865, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-11T04:37:08,026 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42693-0x101959e565f0001, quorum=127.0.0.1:62865, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-11T04:37:08,026 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42693 2024-11-11T04:37:08,027 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42693 2024-11-11T04:37:08,027 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42693 2024-11-11T04:37:08,027 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42693 2024-11-11T04:37:08,027 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42693 2024-11-11T04:37:08,038 DEBUG [M:0;a7bef91497aa:34583 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;a7bef91497aa:34583 2024-11-11T04:37:08,038 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/a7bef91497aa,34583,1731299827971 2024-11-11T04:37:08,040 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34583-0x101959e565f0000, quorum=127.0.0.1:62865, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T04:37:08,040 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42693-0x101959e565f0001, quorum=127.0.0.1:62865, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T04:37:08,041 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34583-0x101959e565f0000, quorum=127.0.0.1:62865, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/a7bef91497aa,34583,1731299827971 2024-11-11T04:37:08,042 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42693-0x101959e565f0001, quorum=127.0.0.1:62865, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-11T04:37:08,042 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34583-0x101959e565f0000, quorum=127.0.0.1:62865, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:37:08,042 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42693-0x101959e565f0001, quorum=127.0.0.1:62865, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:37:08,043 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34583-0x101959e565f0000, quorum=127.0.0.1:62865, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-11T04:37:08,043 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/a7bef91497aa,34583,1731299827971 from backup master directory 2024-11-11T04:37:08,044 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42693-0x101959e565f0001, quorum=127.0.0.1:62865, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T04:37:08,044 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34583-0x101959e565f0000, quorum=127.0.0.1:62865, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/a7bef91497aa,34583,1731299827971 2024-11-11T04:37:08,044 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34583-0x101959e565f0000, quorum=127.0.0.1:62865, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T04:37:08,044 WARN [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-11T04:37:08,044 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=a7bef91497aa,34583,1731299827971 2024-11-11T04:37:08,048 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:42503/user/jenkins/test-data/f01b0935-f454-2aee-3762-cc3d9126fc44/hbase.id] with ID: f9241465-994c-4cb3-aebe-5607a1836729 2024-11-11T04:37:08,048 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:42503/user/jenkins/test-data/f01b0935-f454-2aee-3762-cc3d9126fc44/.tmp/hbase.id 2024-11-11T04:37:08,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42341 is added to blk_1073741826_1002 (size=42) 2024-11-11T04:37:08,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39623 is added to blk_1073741826_1002 (size=42) 2024-11-11T04:37:08,055 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:42503/user/jenkins/test-data/f01b0935-f454-2aee-3762-cc3d9126fc44/.tmp/hbase.id]:[hdfs://localhost:42503/user/jenkins/test-data/f01b0935-f454-2aee-3762-cc3d9126fc44/hbase.id] 2024-11-11T04:37:08,065 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T04:37:08,065 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-11T04:37:08,066 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-11T04:37:08,068 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34583-0x101959e565f0000, quorum=127.0.0.1:62865, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:37:08,068 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42693-0x101959e565f0001, quorum=127.0.0.1:62865, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:37:08,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42341 is added to blk_1073741827_1003 (size=196) 2024-11-11T04:37:08,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39623 is added to blk_1073741827_1003 (size=196) 2024-11-11T04:37:08,076 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-11T04:37:08,077 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-11T04:37:08,077 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T04:37:08,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39623 is added to blk_1073741828_1004 (size=1189) 2024-11-11T04:37:08,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42341 is added to blk_1073741828_1004 (size=1189) 2024-11-11T04:37:08,084 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:42503/user/jenkins/test-data/f01b0935-f454-2aee-3762-cc3d9126fc44/MasterData/data/master/store 2024-11-11T04:37:08,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42341 is added to blk_1073741829_1005 (size=34) 2024-11-11T04:37:08,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39623 is added to blk_1073741829_1005 (size=34) 2024-11-11T04:37:08,089 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T04:37:08,089 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-11T04:37:08,089 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T04:37:08,089 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T04:37:08,089 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-11T04:37:08,089 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T04:37:08,089 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T04:37:08,089 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731299828089Disabling compacts and flushes for region at 1731299828089Disabling writes for close at 1731299828089Writing region close event to WAL at 1731299828089Closed at 1731299828089 2024-11-11T04:37:08,090 WARN [master/a7bef91497aa:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:42503/user/jenkins/test-data/f01b0935-f454-2aee-3762-cc3d9126fc44/MasterData/data/master/store/.initializing 2024-11-11T04:37:08,090 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:42503/user/jenkins/test-data/f01b0935-f454-2aee-3762-cc3d9126fc44/MasterData/WALs/a7bef91497aa,34583,1731299827971 2024-11-11T04:37:08,092 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a7bef91497aa%2C34583%2C1731299827971, suffix=, logDir=hdfs://localhost:42503/user/jenkins/test-data/f01b0935-f454-2aee-3762-cc3d9126fc44/MasterData/WALs/a7bef91497aa,34583,1731299827971, archiveDir=hdfs://localhost:42503/user/jenkins/test-data/f01b0935-f454-2aee-3762-cc3d9126fc44/MasterData/oldWALs, maxLogs=10 2024-11-11T04:37:08,092 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor a7bef91497aa%2C34583%2C1731299827971.1731299828092 2024-11-11T04:37:08,096 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f01b0935-f454-2aee-3762-cc3d9126fc44/MasterData/WALs/a7bef91497aa,34583,1731299827971/a7bef91497aa%2C34583%2C1731299827971.1731299828092 2024-11-11T04:37:08,098 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35151:35151),(127.0.0.1/127.0.0.1:42003:42003)] 2024-11-11T04:37:08,100 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-11T04:37:08,100 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T04:37:08,100 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:37:08,100 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:37:08,101 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:37:08,102 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-11T04:37:08,102 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:37:08,103 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T04:37:08,103 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:37:08,104 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-11T04:37:08,104 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:37:08,104 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T04:37:08,104 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:37:08,105 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-11T04:37:08,105 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:37:08,105 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T04:37:08,105 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:37:08,106 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-11T04:37:08,106 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:37:08,107 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T04:37:08,107 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:37:08,108 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42503/user/jenkins/test-data/f01b0935-f454-2aee-3762-cc3d9126fc44/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:37:08,108 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42503/user/jenkins/test-data/f01b0935-f454-2aee-3762-cc3d9126fc44/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:37:08,109 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:37:08,109 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:37:08,109 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-11T04:37:08,110 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:37:08,111 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42503/user/jenkins/test-data/f01b0935-f454-2aee-3762-cc3d9126fc44/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T04:37:08,112 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=881066, jitterRate=0.12033425271511078}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-11T04:37:08,112 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731299828100Initializing all the Stores at 1731299828101 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731299828101Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731299828101Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731299828101Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731299828101Cleaning up temporary data from old regions at 1731299828109 (+8 ms)Region opened successfully at 1731299828112 (+3 ms) 2024-11-11T04:37:08,112 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-11T04:37:08,115 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@8f739e0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=a7bef91497aa/172.17.0.2:0 2024-11-11T04:37:08,116 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-11T04:37:08,116 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-11T04:37:08,116 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-11T04:37:08,116 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-11T04:37:08,116 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-11T04:37:08,117 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-11T04:37:08,117 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-11T04:37:08,118 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-11T04:37:08,119 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34583-0x101959e565f0000, quorum=127.0.0.1:62865, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-11T04:37:08,121 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-11T04:37:08,121 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-11T04:37:08,121 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34583-0x101959e565f0000, quorum=127.0.0.1:62865, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-11T04:37:08,123 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-11T04:37:08,123 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-11T04:37:08,124 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34583-0x101959e565f0000, quorum=127.0.0.1:62865, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-11T04:37:08,125 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-11T04:37:08,126 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34583-0x101959e565f0000, quorum=127.0.0.1:62865, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-11T04:37:08,127 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-11T04:37:08,128 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34583-0x101959e565f0000, quorum=127.0.0.1:62865, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-11T04:37:08,129 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-11T04:37:08,131 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34583-0x101959e565f0000, quorum=127.0.0.1:62865, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-11T04:37:08,131 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42693-0x101959e565f0001, quorum=127.0.0.1:62865, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-11T04:37:08,131 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34583-0x101959e565f0000, quorum=127.0.0.1:62865, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:37:08,132 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42693-0x101959e565f0001, quorum=127.0.0.1:62865, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:37:08,132 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=a7bef91497aa,34583,1731299827971, sessionid=0x101959e565f0000, setting cluster-up flag (Was=false) 2024-11-11T04:37:08,135 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34583-0x101959e565f0000, quorum=127.0.0.1:62865, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:37:08,135 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42693-0x101959e565f0001, quorum=127.0.0.1:62865, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:37:08,140 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-11T04:37:08,141 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=a7bef91497aa,34583,1731299827971 2024-11-11T04:37:08,144 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34583-0x101959e565f0000, quorum=127.0.0.1:62865, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:37:08,144 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42693-0x101959e565f0001, quorum=127.0.0.1:62865, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:37:08,150 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-11T04:37:08,151 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=a7bef91497aa,34583,1731299827971 2024-11-11T04:37:08,151 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:42503/user/jenkins/test-data/f01b0935-f454-2aee-3762-cc3d9126fc44/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-11T04:37:08,153 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-11T04:37:08,153 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-11T04:37:08,153 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-11T04:37:08,153 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: a7bef91497aa,34583,1731299827971 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-11T04:37:08,154 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/a7bef91497aa:0, corePoolSize=5, maxPoolSize=5 2024-11-11T04:37:08,154 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/a7bef91497aa:0, corePoolSize=5, maxPoolSize=5 2024-11-11T04:37:08,154 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/a7bef91497aa:0, corePoolSize=5, maxPoolSize=5 2024-11-11T04:37:08,154 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/a7bef91497aa:0, corePoolSize=5, maxPoolSize=5 2024-11-11T04:37:08,154 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/a7bef91497aa:0, corePoolSize=10, maxPoolSize=10 2024-11-11T04:37:08,154 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:37:08,154 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/a7bef91497aa:0, corePoolSize=2, maxPoolSize=2 2024-11-11T04:37:08,154 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:37:08,155 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731299858155 2024-11-11T04:37:08,155 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-11T04:37:08,155 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-11T04:37:08,155 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-11T04:37:08,155 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-11T04:37:08,155 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-11T04:37:08,155 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-11T04:37:08,155 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-11T04:37:08,156 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-11T04:37:08,156 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-11T04:37:08,156 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-11T04:37:08,156 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-11T04:37:08,156 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-11T04:37:08,156 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-11T04:37:08,156 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-11T04:37:08,156 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/a7bef91497aa:0:becomeActiveMaster-HFileCleaner.large.0-1731299828156,5,FailOnTimeoutGroup] 2024-11-11T04:37:08,156 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/a7bef91497aa:0:becomeActiveMaster-HFileCleaner.small.0-1731299828156,5,FailOnTimeoutGroup] 2024-11-11T04:37:08,156 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-11T04:37:08,156 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-11T04:37:08,157 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-11T04:37:08,157 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:37:08,157 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-11T04:37:08,157 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-11T04:37:08,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42341 is added to blk_1073741831_1007 (size=1321) 2024-11-11T04:37:08,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39623 is added to blk_1073741831_1007 (size=1321) 2024-11-11T04:37:08,166 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:42503/user/jenkins/test-data/f01b0935-f454-2aee-3762-cc3d9126fc44/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-11T04:37:08,166 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:42503/user/jenkins/test-data/f01b0935-f454-2aee-3762-cc3d9126fc44 2024-11-11T04:37:08,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39623 is added to blk_1073741832_1008 (size=32) 2024-11-11T04:37:08,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42341 is added to blk_1073741832_1008 (size=32) 2024-11-11T04:37:08,172 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T04:37:08,173 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-11T04:37:08,174 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-11T04:37:08,174 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:37:08,175 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T04:37:08,175 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-11T04:37:08,176 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-11T04:37:08,176 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:37:08,176 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T04:37:08,176 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-11T04:37:08,177 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-11T04:37:08,177 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:37:08,177 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T04:37:08,177 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-11T04:37:08,178 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-11T04:37:08,178 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:37:08,178 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T04:37:08,178 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-11T04:37:08,179 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42503/user/jenkins/test-data/f01b0935-f454-2aee-3762-cc3d9126fc44/data/hbase/meta/1588230740 2024-11-11T04:37:08,179 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42503/user/jenkins/test-data/f01b0935-f454-2aee-3762-cc3d9126fc44/data/hbase/meta/1588230740 2024-11-11T04:37:08,180 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-11T04:37:08,180 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-11T04:37:08,180 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-11T04:37:08,181 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-11T04:37:08,183 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42503/user/jenkins/test-data/f01b0935-f454-2aee-3762-cc3d9126fc44/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T04:37:08,183 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=877957, jitterRate=0.11638069152832031}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-11T04:37:08,184 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731299828172Initializing all the Stores at 1731299828173 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731299828173Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731299828173Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731299828173Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731299828173Cleaning up temporary data from old regions at 1731299828180 (+7 ms)Region opened successfully at 1731299828183 (+3 ms) 2024-11-11T04:37:08,184 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-11T04:37:08,184 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-11T04:37:08,184 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-11T04:37:08,184 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-11T04:37:08,184 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-11T04:37:08,184 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-11T04:37:08,184 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731299828184Disabling compacts and flushes for region at 1731299828184Disabling writes for close at 1731299828184Writing region close event to WAL at 1731299828184Closed at 1731299828184 2024-11-11T04:37:08,185 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-11T04:37:08,185 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-11T04:37:08,185 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-11T04:37:08,186 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-11T04:37:08,187 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-11T04:37:08,229 INFO [RS:0;a7bef91497aa:42693 {}] regionserver.HRegionServer(746): ClusterId : f9241465-994c-4cb3-aebe-5607a1836729 2024-11-11T04:37:08,229 DEBUG [RS:0;a7bef91497aa:42693 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-11T04:37:08,231 DEBUG [RS:0;a7bef91497aa:42693 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-11T04:37:08,231 DEBUG [RS:0;a7bef91497aa:42693 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-11T04:37:08,233 DEBUG [RS:0;a7bef91497aa:42693 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-11T04:37:08,234 DEBUG [RS:0;a7bef91497aa:42693 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@675f66cc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=a7bef91497aa/172.17.0.2:0 2024-11-11T04:37:08,245 DEBUG [RS:0;a7bef91497aa:42693 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;a7bef91497aa:42693 2024-11-11T04:37:08,246 INFO [RS:0;a7bef91497aa:42693 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-11T04:37:08,246 INFO [RS:0;a7bef91497aa:42693 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-11T04:37:08,246 DEBUG [RS:0;a7bef91497aa:42693 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-11T04:37:08,246 INFO [RS:0;a7bef91497aa:42693 {}] regionserver.HRegionServer(2659): reportForDuty to master=a7bef91497aa,34583,1731299827971 with port=42693, startcode=1731299828016 2024-11-11T04:37:08,246 DEBUG [RS:0;a7bef91497aa:42693 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-11T04:37:08,248 INFO [HMaster-EventLoopGroup-16-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57135, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.7 (auth:SIMPLE), service=RegionServerStatusService 2024-11-11T04:37:08,249 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34583 {}] master.ServerManager(363): Checking decommissioned status of RegionServer a7bef91497aa,42693,1731299828016 2024-11-11T04:37:08,249 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34583 {}] master.ServerManager(517): Registering regionserver=a7bef91497aa,42693,1731299828016 2024-11-11T04:37:08,250 DEBUG [RS:0;a7bef91497aa:42693 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:42503/user/jenkins/test-data/f01b0935-f454-2aee-3762-cc3d9126fc44 2024-11-11T04:37:08,250 DEBUG [RS:0;a7bef91497aa:42693 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:42503 2024-11-11T04:37:08,250 DEBUG [RS:0;a7bef91497aa:42693 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-11T04:37:08,252 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34583-0x101959e565f0000, quorum=127.0.0.1:62865, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-11T04:37:08,253 DEBUG [RS:0;a7bef91497aa:42693 {}] zookeeper.ZKUtil(111): regionserver:42693-0x101959e565f0001, quorum=127.0.0.1:62865, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/a7bef91497aa,42693,1731299828016 2024-11-11T04:37:08,253 WARN [RS:0;a7bef91497aa:42693 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-11T04:37:08,253 INFO [RS:0;a7bef91497aa:42693 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T04:37:08,253 DEBUG [RS:0;a7bef91497aa:42693 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:42503/user/jenkins/test-data/f01b0935-f454-2aee-3762-cc3d9126fc44/WALs/a7bef91497aa,42693,1731299828016 2024-11-11T04:37:08,253 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [a7bef91497aa,42693,1731299828016] 2024-11-11T04:37:08,256 INFO [RS:0;a7bef91497aa:42693 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-11T04:37:08,257 INFO [RS:0;a7bef91497aa:42693 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-11T04:37:08,257 INFO [RS:0;a7bef91497aa:42693 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-11T04:37:08,257 INFO [RS:0;a7bef91497aa:42693 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T04:37:08,258 INFO [RS:0;a7bef91497aa:42693 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-11T04:37:08,258 INFO [RS:0;a7bef91497aa:42693 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-11T04:37:08,258 INFO [RS:0;a7bef91497aa:42693 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-11T04:37:08,258 DEBUG [RS:0;a7bef91497aa:42693 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:37:08,258 DEBUG [RS:0;a7bef91497aa:42693 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:37:08,258 DEBUG [RS:0;a7bef91497aa:42693 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:37:08,258 DEBUG [RS:0;a7bef91497aa:42693 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:37:08,259 DEBUG [RS:0;a7bef91497aa:42693 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:37:08,259 DEBUG [RS:0;a7bef91497aa:42693 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/a7bef91497aa:0, corePoolSize=2, maxPoolSize=2 2024-11-11T04:37:08,259 DEBUG [RS:0;a7bef91497aa:42693 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:37:08,259 DEBUG [RS:0;a7bef91497aa:42693 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:37:08,259 DEBUG [RS:0;a7bef91497aa:42693 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:37:08,259 DEBUG [RS:0;a7bef91497aa:42693 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:37:08,259 DEBUG [RS:0;a7bef91497aa:42693 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:37:08,259 DEBUG [RS:0;a7bef91497aa:42693 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/a7bef91497aa:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:37:08,259 DEBUG [RS:0;a7bef91497aa:42693 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/a7bef91497aa:0, corePoolSize=3, maxPoolSize=3 2024-11-11T04:37:08,259 DEBUG [RS:0;a7bef91497aa:42693 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/a7bef91497aa:0, corePoolSize=3, maxPoolSize=3 2024-11-11T04:37:08,260 INFO [RS:0;a7bef91497aa:42693 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T04:37:08,260 INFO [RS:0;a7bef91497aa:42693 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T04:37:08,260 INFO [RS:0;a7bef91497aa:42693 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T04:37:08,260 INFO [RS:0;a7bef91497aa:42693 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-11T04:37:08,260 INFO [RS:0;a7bef91497aa:42693 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-11T04:37:08,260 INFO [RS:0;a7bef91497aa:42693 {}] hbase.ChoreService(168): Chore ScheduledChore name=a7bef91497aa,42693,1731299828016-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-11T04:37:08,274 INFO [RS:0;a7bef91497aa:42693 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-11T04:37:08,274 INFO [RS:0;a7bef91497aa:42693 {}] hbase.ChoreService(168): Chore ScheduledChore name=a7bef91497aa,42693,1731299828016-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T04:37:08,274 INFO [RS:0;a7bef91497aa:42693 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T04:37:08,274 INFO [RS:0;a7bef91497aa:42693 {}] regionserver.Replication(171): a7bef91497aa,42693,1731299828016 started 2024-11-11T04:37:08,287 INFO [RS:0;a7bef91497aa:42693 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T04:37:08,287 INFO [RS:0;a7bef91497aa:42693 {}] regionserver.HRegionServer(1482): Serving as a7bef91497aa,42693,1731299828016, RpcServer on a7bef91497aa/172.17.0.2:42693, sessionid=0x101959e565f0001 2024-11-11T04:37:08,287 DEBUG [RS:0;a7bef91497aa:42693 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-11T04:37:08,287 DEBUG [RS:0;a7bef91497aa:42693 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager a7bef91497aa,42693,1731299828016 2024-11-11T04:37:08,287 DEBUG [RS:0;a7bef91497aa:42693 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'a7bef91497aa,42693,1731299828016' 2024-11-11T04:37:08,287 DEBUG [RS:0;a7bef91497aa:42693 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-11T04:37:08,288 DEBUG [RS:0;a7bef91497aa:42693 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-11T04:37:08,288 DEBUG [RS:0;a7bef91497aa:42693 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-11T04:37:08,288 DEBUG [RS:0;a7bef91497aa:42693 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-11T04:37:08,288 DEBUG [RS:0;a7bef91497aa:42693 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager a7bef91497aa,42693,1731299828016 2024-11-11T04:37:08,288 DEBUG [RS:0;a7bef91497aa:42693 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'a7bef91497aa,42693,1731299828016' 2024-11-11T04:37:08,288 DEBUG [RS:0;a7bef91497aa:42693 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-11T04:37:08,289 DEBUG [RS:0;a7bef91497aa:42693 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-11T04:37:08,289 DEBUG [RS:0;a7bef91497aa:42693 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-11T04:37:08,289 INFO [RS:0;a7bef91497aa:42693 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-11T04:37:08,289 INFO [RS:0;a7bef91497aa:42693 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-11T04:37:08,337 WARN [a7bef91497aa:34583 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-11T04:37:08,391 INFO [RS:0;a7bef91497aa:42693 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a7bef91497aa%2C42693%2C1731299828016, suffix=, logDir=hdfs://localhost:42503/user/jenkins/test-data/f01b0935-f454-2aee-3762-cc3d9126fc44/WALs/a7bef91497aa,42693,1731299828016, archiveDir=hdfs://localhost:42503/user/jenkins/test-data/f01b0935-f454-2aee-3762-cc3d9126fc44/oldWALs, maxLogs=32 2024-11-11T04:37:08,391 INFO [RS:0;a7bef91497aa:42693 {}] monitor.StreamSlowMonitor(122): New stream slow monitor a7bef91497aa%2C42693%2C1731299828016.1731299828391 2024-11-11T04:37:08,397 INFO [RS:0;a7bef91497aa:42693 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f01b0935-f454-2aee-3762-cc3d9126fc44/WALs/a7bef91497aa,42693,1731299828016/a7bef91497aa%2C42693%2C1731299828016.1731299828391 2024-11-11T04:37:08,401 DEBUG [RS:0;a7bef91497aa:42693 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42003:42003),(127.0.0.1/127.0.0.1:35151:35151)] 2024-11-11T04:37:08,453 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,38817,1731299646525/a7bef91497aa%2C38817%2C1731299646525.meta.1731299647327.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:37:08,453 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41327/user/jenkins/test-data/32643456-ae57-e4de-948d-780bff8d5094/WALs/a7bef91497aa,40761,1731299647475/a7bef91497aa%2C40761%2C1731299647475.1731299647660 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T04:37:08,587 DEBUG [a7bef91497aa:34583 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-11T04:37:08,588 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=a7bef91497aa,42693,1731299828016 2024-11-11T04:37:08,589 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as a7bef91497aa,42693,1731299828016, state=OPENING 2024-11-11T04:37:08,591 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-11T04:37:08,593 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42693-0x101959e565f0001, quorum=127.0.0.1:62865, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:37:08,593 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34583-0x101959e565f0000, quorum=127.0.0.1:62865, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:37:08,593 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-11T04:37:08,593 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T04:37:08,593 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T04:37:08,593 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=a7bef91497aa,42693,1731299828016}] 2024-11-11T04:37:08,746 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-11T04:37:08,747 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59277, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-11T04:37:08,751 INFO [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-11T04:37:08,751 INFO [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T04:37:08,752 INFO [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a7bef91497aa%2C42693%2C1731299828016.meta, suffix=.meta, logDir=hdfs://localhost:42503/user/jenkins/test-data/f01b0935-f454-2aee-3762-cc3d9126fc44/WALs/a7bef91497aa,42693,1731299828016, archiveDir=hdfs://localhost:42503/user/jenkins/test-data/f01b0935-f454-2aee-3762-cc3d9126fc44/oldWALs, maxLogs=32 2024-11-11T04:37:08,752 INFO [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor a7bef91497aa%2C42693%2C1731299828016.meta.1731299828752.meta 2024-11-11T04:37:08,761 INFO [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f01b0935-f454-2aee-3762-cc3d9126fc44/WALs/a7bef91497aa,42693,1731299828016/a7bef91497aa%2C42693%2C1731299828016.meta.1731299828752.meta 2024-11-11T04:37:08,764 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42003:42003),(127.0.0.1/127.0.0.1:35151:35151)] 2024-11-11T04:37:08,768 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-11T04:37:08,768 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-11T04:37:08,768 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-11T04:37:08,768 INFO [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-11T04:37:08,768 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-11T04:37:08,768 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T04:37:08,769 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-11T04:37:08,769 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-11T04:37:08,770 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-11T04:37:08,770 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-11T04:37:08,770 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:37:08,771 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T04:37:08,771 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-11T04:37:08,771 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-11T04:37:08,771 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:37:08,772 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T04:37:08,772 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-11T04:37:08,772 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-11T04:37:08,772 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:37:08,773 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T04:37:08,773 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-11T04:37:08,773 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-11T04:37:08,773 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:37:08,773 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T04:37:08,773 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-11T04:37:08,774 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42503/user/jenkins/test-data/f01b0935-f454-2aee-3762-cc3d9126fc44/data/hbase/meta/1588230740 2024-11-11T04:37:08,775 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42503/user/jenkins/test-data/f01b0935-f454-2aee-3762-cc3d9126fc44/data/hbase/meta/1588230740 2024-11-11T04:37:08,776 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-11T04:37:08,776 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-11T04:37:08,776 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-11T04:37:08,777 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-11T04:37:08,778 INFO [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=696802, jitterRate=-0.11397136747837067}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-11T04:37:08,778 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-11T04:37:08,778 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731299828769Writing region info on filesystem at 1731299828769Initializing all the Stores at 1731299828769Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731299828769Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731299828770 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731299828770Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731299828770Cleaning up temporary data from old regions at 1731299828776 (+6 ms)Running coprocessor post-open hooks at 1731299828778 (+2 ms)Region opened successfully at 1731299828778 2024-11-11T04:37:08,779 INFO [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731299828745 2024-11-11T04:37:08,781 DEBUG [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-11T04:37:08,781 INFO [RS_OPEN_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-11T04:37:08,782 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=a7bef91497aa,42693,1731299828016 2024-11-11T04:37:08,783 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as a7bef91497aa,42693,1731299828016, state=OPEN 2024-11-11T04:37:08,787 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34583-0x101959e565f0000, quorum=127.0.0.1:62865, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-11T04:37:08,787 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42693-0x101959e565f0001, quorum=127.0.0.1:62865, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-11T04:37:08,788 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=a7bef91497aa,42693,1731299828016 2024-11-11T04:37:08,788 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T04:37:08,788 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T04:37:08,790 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-11T04:37:08,790 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=a7bef91497aa,42693,1731299828016 in 195 msec 2024-11-11T04:37:08,792 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-11T04:37:08,792 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 605 msec 2024-11-11T04:37:08,792 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-11T04:37:08,792 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-11T04:37:08,794 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-11T04:37:08,794 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=a7bef91497aa,42693,1731299828016, seqNum=-1] 2024-11-11T04:37:08,794 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T04:37:08,795 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53767, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T04:37:08,799 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 646 msec 2024-11-11T04:37:08,799 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731299828799, completionTime=-1 2024-11-11T04:37:08,799 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-11T04:37:08,799 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-11T04:37:08,801 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-11T04:37:08,801 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731299888801 2024-11-11T04:37:08,801 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731299948801 2024-11-11T04:37:08,801 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-11-11T04:37:08,801 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a7bef91497aa,34583,1731299827971-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T04:37:08,801 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a7bef91497aa,34583,1731299827971-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T04:37:08,801 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a7bef91497aa,34583,1731299827971-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T04:37:08,801 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-a7bef91497aa:34583, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T04:37:08,802 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-11T04:37:08,802 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-11T04:37:08,803 DEBUG [master/a7bef91497aa:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-11T04:37:08,805 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.761sec 2024-11-11T04:37:08,805 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-11T04:37:08,805 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-11T04:37:08,805 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-11T04:37:08,805 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-11T04:37:08,805 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-11T04:37:08,805 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a7bef91497aa,34583,1731299827971-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-11T04:37:08,805 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a7bef91497aa,34583,1731299827971-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-11T04:37:08,807 DEBUG [master/a7bef91497aa:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-11T04:37:08,807 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-11T04:37:08,807 INFO [master/a7bef91497aa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a7bef91497aa,34583,1731299827971-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T04:37:08,829 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6bfe0bbd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T04:37:08,829 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request a7bef91497aa,34583,-1 for getting cluster id 2024-11-11T04:37:08,829 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-11T04:37:08,830 DEBUG [HMaster-EventLoopGroup-16-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'f9241465-994c-4cb3-aebe-5607a1836729' 2024-11-11T04:37:08,831 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-11T04:37:08,831 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "f9241465-994c-4cb3-aebe-5607a1836729" 2024-11-11T04:37:08,831 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@65934ea5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T04:37:08,831 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [a7bef91497aa,34583,-1] 2024-11-11T04:37:08,831 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-11T04:37:08,832 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T04:37:08,833 INFO [HMaster-EventLoopGroup-16-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36962, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-11T04:37:08,833 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@350bdc50, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T04:37:08,834 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-11T04:37:08,834 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=a7bef91497aa,42693,1731299828016, seqNum=-1] 2024-11-11T04:37:08,835 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T04:37:08,835 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56710, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T04:37:08,837 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=a7bef91497aa,34583,1731299827971 2024-11-11T04:37:08,837 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T04:37:08,839 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-11T04:37:08,840 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T04:37:08,842 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=test.com%2C8080%2C1, suffix=, logDir=hdfs://localhost:42503/user/jenkins/test-data/f01b0935-f454-2aee-3762-cc3d9126fc44/WALs/test.com,8080,1, archiveDir=hdfs://localhost:42503/user/jenkins/test-data/f01b0935-f454-2aee-3762-cc3d9126fc44/oldWALs, maxLogs=32 2024-11-11T04:37:08,842 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1731299828842 2024-11-11T04:37:08,847 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f01b0935-f454-2aee-3762-cc3d9126fc44/WALs/test.com,8080,1/test.com%2C8080%2C1.1731299828842 2024-11-11T04:37:08,847 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42003:42003),(127.0.0.1/127.0.0.1:35151:35151)] 2024-11-11T04:37:08,851 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1731299828851 2024-11-11T04:37:08,857 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:37:08,857 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:37:08,857 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:37:08,857 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:37:08,857 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:37:08,857 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/f01b0935-f454-2aee-3762-cc3d9126fc44/WALs/test.com,8080,1/test.com%2C8080%2C1.1731299828842 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/f01b0935-f454-2aee-3762-cc3d9126fc44/WALs/test.com,8080,1/test.com%2C8080%2C1.1731299828851 2024-11-11T04:37:08,860 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42003:42003),(127.0.0.1/127.0.0.1:35151:35151)] 2024-11-11T04:37:08,860 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:42503/user/jenkins/test-data/f01b0935-f454-2aee-3762-cc3d9126fc44/WALs/test.com,8080,1/test.com%2C8080%2C1.1731299828842 is not closed yet, will try archiving it next time 2024-11-11T04:37:08,861 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:37:08,861 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:37:08,861 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:37:08,861 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:37:08,861 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:37:08,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39623 is added to blk_1073741835_1011 (size=93) 2024-11-11T04:37:08,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42341 is added to blk_1073741835_1011 (size=93) 2024-11-11T04:37:08,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39623 is added to blk_1073741836_1012 (size=93) 2024-11-11T04:37:08,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42341 is added to blk_1073741836_1012 (size=93) 2024-11-11T04:37:08,864 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42503/user/jenkins/test-data/f01b0935-f454-2aee-3762-cc3d9126fc44/WALs/test.com,8080,1/test.com%2C8080%2C1.1731299828842 to hdfs://localhost:42503/user/jenkins/test-data/f01b0935-f454-2aee-3762-cc3d9126fc44/oldWALs/test.com%2C8080%2C1.1731299828842 2024-11-11T04:37:08,866 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/f01b0935-f454-2aee-3762-cc3d9126fc44/oldWALs 2024-11-11T04:37:08,866 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog test.com%2C8080%2C1:(num 1731299828851) 2024-11-11T04:37:08,866 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-11T04:37:08,866 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-11T04:37:08,866 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T04:37:08,866 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T04:37:08,866 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T04:37:08,866 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-11T04:37:08,866 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-11T04:37:08,866 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=138090316, stopped=false 2024-11-11T04:37:08,867 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=a7bef91497aa,34583,1731299827971 2024-11-11T04:37:08,868 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34583-0x101959e565f0000, quorum=127.0.0.1:62865, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-11T04:37:08,868 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42693-0x101959e565f0001, quorum=127.0.0.1:62865, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-11T04:37:08,868 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34583-0x101959e565f0000, quorum=127.0.0.1:62865, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:37:08,868 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42693-0x101959e565f0001, quorum=127.0.0.1:62865, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:37:08,868 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-11T04:37:08,868 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-11T04:37:08,868 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T04:37:08,869 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T04:37:08,869 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'a7bef91497aa,42693,1731299828016' ***** 2024-11-11T04:37:08,869 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-11T04:37:08,869 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:34583-0x101959e565f0000, quorum=127.0.0.1:62865, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T04:37:08,869 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42693-0x101959e565f0001, quorum=127.0.0.1:62865, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T04:37:08,869 INFO [RS:0;a7bef91497aa:42693 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-11T04:37:08,869 INFO [RS:0;a7bef91497aa:42693 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-11T04:37:08,869 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-11T04:37:08,870 INFO [RS:0;a7bef91497aa:42693 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-11T04:37:08,870 INFO [RS:0;a7bef91497aa:42693 {}] regionserver.HRegionServer(959): stopping server a7bef91497aa,42693,1731299828016 2024-11-11T04:37:08,870 INFO [RS:0;a7bef91497aa:42693 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-11T04:37:08,870 INFO [RS:0;a7bef91497aa:42693 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;a7bef91497aa:42693. 2024-11-11T04:37:08,870 DEBUG [RS:0;a7bef91497aa:42693 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T04:37:08,870 DEBUG [RS:0;a7bef91497aa:42693 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T04:37:08,870 INFO [RS:0;a7bef91497aa:42693 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-11T04:37:08,870 INFO [RS:0;a7bef91497aa:42693 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-11T04:37:08,870 INFO [RS:0;a7bef91497aa:42693 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-11T04:37:08,870 INFO [RS:0;a7bef91497aa:42693 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-11T04:37:08,870 INFO [RS:0;a7bef91497aa:42693 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-11T04:37:08,870 DEBUG [RS:0;a7bef91497aa:42693 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-11T04:37:08,870 DEBUG [RS:0;a7bef91497aa:42693 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-11T04:37:08,871 DEBUG [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-11T04:37:08,871 INFO [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-11T04:37:08,871 DEBUG [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-11T04:37:08,871 DEBUG [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-11T04:37:08,871 DEBUG [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-11T04:37:08,871 INFO [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-11T04:37:08,886 DEBUG [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42503/user/jenkins/test-data/f01b0935-f454-2aee-3762-cc3d9126fc44/data/hbase/meta/1588230740/.tmp/ns/69a3adc837bd4d1992d56e56cd7a2859 is 43, key is default/ns:d/1731299828795/Put/seqid=0 2024-11-11T04:37:08,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39623 is added to blk_1073741837_1013 (size=5153) 2024-11-11T04:37:08,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42341 is added to blk_1073741837_1013 (size=5153) 2024-11-11T04:37:08,891 INFO [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:42503/user/jenkins/test-data/f01b0935-f454-2aee-3762-cc3d9126fc44/data/hbase/meta/1588230740/.tmp/ns/69a3adc837bd4d1992d56e56cd7a2859 2024-11-11T04:37:08,897 DEBUG [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42503/user/jenkins/test-data/f01b0935-f454-2aee-3762-cc3d9126fc44/data/hbase/meta/1588230740/.tmp/ns/69a3adc837bd4d1992d56e56cd7a2859 as hdfs://localhost:42503/user/jenkins/test-data/f01b0935-f454-2aee-3762-cc3d9126fc44/data/hbase/meta/1588230740/ns/69a3adc837bd4d1992d56e56cd7a2859 2024-11-11T04:37:08,902 INFO [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42503/user/jenkins/test-data/f01b0935-f454-2aee-3762-cc3d9126fc44/data/hbase/meta/1588230740/ns/69a3adc837bd4d1992d56e56cd7a2859, entries=2, sequenceid=6, filesize=5.0 K 2024-11-11T04:37:08,903 INFO [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 31ms, sequenceid=6, compaction requested=false 2024-11-11T04:37:08,903 DEBUG [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-11T04:37:08,906 DEBUG [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42503/user/jenkins/test-data/f01b0935-f454-2aee-3762-cc3d9126fc44/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-11T04:37:08,907 DEBUG [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-11T04:37:08,907 INFO [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-11T04:37:08,907 DEBUG [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731299828870Running coprocessor pre-close hooks at 1731299828870Disabling compacts and flushes for region at 1731299828871 (+1 ms)Disabling writes for close at 1731299828871Obtaining lock to block concurrent updates at 1731299828871Preparing flush snapshotting stores in 1588230740 at 1731299828871Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1731299828871Flushing stores of hbase:meta,,1.1588230740 at 1731299828872 (+1 ms)Flushing 1588230740/ns: creating writer at 1731299828872Flushing 1588230740/ns: appending metadata at 1731299828886 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1731299828886Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@457f236c: reopening flushed file at 1731299828896 (+10 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 31ms, sequenceid=6, compaction requested=false at 1731299828903 (+7 ms)Writing region close event to WAL at 1731299828904 (+1 ms)Running coprocessor post-close hooks at 1731299828907 (+3 ms)Closed at 1731299828907 2024-11-11T04:37:08,907 DEBUG [RS_CLOSE_META-regionserver/a7bef91497aa:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-11T04:37:09,071 INFO [RS:0;a7bef91497aa:42693 {}] regionserver.HRegionServer(976): stopping server a7bef91497aa,42693,1731299828016; all regions closed. 2024-11-11T04:37:09,071 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:37:09,071 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:37:09,071 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:37:09,071 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:37:09,071 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:37:09,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39623 is added to blk_1073741834_1010 (size=1152) 2024-11-11T04:37:09,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42341 is added to blk_1073741834_1010 (size=1152) 2024-11-11T04:37:09,076 DEBUG [RS:0;a7bef91497aa:42693 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/f01b0935-f454-2aee-3762-cc3d9126fc44/oldWALs 2024-11-11T04:37:09,076 INFO [RS:0;a7bef91497aa:42693 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog a7bef91497aa%2C42693%2C1731299828016.meta:.meta(num 1731299828752) 2024-11-11T04:37:09,076 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:37:09,076 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:37:09,076 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:37:09,076 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:37:09,076 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:37:09,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39623 is added to blk_1073741833_1009 (size=93) 2024-11-11T04:37:09,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42341 is added to blk_1073741833_1009 (size=93) 2024-11-11T04:37:09,080 DEBUG [RS:0;a7bef91497aa:42693 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/f01b0935-f454-2aee-3762-cc3d9126fc44/oldWALs 2024-11-11T04:37:09,080 INFO [RS:0;a7bef91497aa:42693 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog a7bef91497aa%2C42693%2C1731299828016:(num 1731299828391) 2024-11-11T04:37:09,080 DEBUG [RS:0;a7bef91497aa:42693 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T04:37:09,080 INFO [RS:0;a7bef91497aa:42693 {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T04:37:09,080 INFO [RS:0;a7bef91497aa:42693 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-11T04:37:09,080 INFO [RS:0;a7bef91497aa:42693 {}] hbase.ChoreService(370): Chore service for: regionserver/a7bef91497aa:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-11T04:37:09,080 INFO [RS:0;a7bef91497aa:42693 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-11T04:37:09,080 INFO [regionserver/a7bef91497aa:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-11T04:37:09,080 INFO [RS:0;a7bef91497aa:42693 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42693 2024-11-11T04:37:09,082 INFO [RS:0;a7bef91497aa:42693 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-11T04:37:09,082 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34583-0x101959e565f0000, quorum=127.0.0.1:62865, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-11T04:37:09,082 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42693-0x101959e565f0001, quorum=127.0.0.1:62865, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/a7bef91497aa,42693,1731299828016 2024-11-11T04:37:09,084 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [a7bef91497aa,42693,1731299828016] 2024-11-11T04:37:09,085 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/a7bef91497aa,42693,1731299828016 already deleted, retry=false 2024-11-11T04:37:09,085 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; a7bef91497aa,42693,1731299828016 expired; onlineServers=0 2024-11-11T04:37:09,085 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'a7bef91497aa,34583,1731299827971' ***** 2024-11-11T04:37:09,085 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-11T04:37:09,086 INFO [M:0;a7bef91497aa:34583 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-11T04:37:09,086 INFO [M:0;a7bef91497aa:34583 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-11T04:37:09,086 DEBUG [M:0;a7bef91497aa:34583 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-11T04:37:09,086 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-11T04:37:09,086 DEBUG [M:0;a7bef91497aa:34583 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-11T04:37:09,086 DEBUG [master/a7bef91497aa:0:becomeActiveMaster-HFileCleaner.large.0-1731299828156 {}] cleaner.HFileCleaner(306): Exit Thread[master/a7bef91497aa:0:becomeActiveMaster-HFileCleaner.large.0-1731299828156,5,FailOnTimeoutGroup] 2024-11-11T04:37:09,086 DEBUG [master/a7bef91497aa:0:becomeActiveMaster-HFileCleaner.small.0-1731299828156 {}] cleaner.HFileCleaner(306): Exit Thread[master/a7bef91497aa:0:becomeActiveMaster-HFileCleaner.small.0-1731299828156,5,FailOnTimeoutGroup] 2024-11-11T04:37:09,086 INFO [M:0;a7bef91497aa:34583 {}] hbase.ChoreService(370): Chore service for: master/a7bef91497aa:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-11T04:37:09,086 INFO [M:0;a7bef91497aa:34583 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-11T04:37:09,086 DEBUG [M:0;a7bef91497aa:34583 {}] master.HMaster(1795): Stopping service threads 2024-11-11T04:37:09,086 INFO [M:0;a7bef91497aa:34583 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-11T04:37:09,086 INFO [M:0;a7bef91497aa:34583 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-11T04:37:09,086 INFO [M:0;a7bef91497aa:34583 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-11T04:37:09,086 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-11T04:37:09,088 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34583-0x101959e565f0000, quorum=127.0.0.1:62865, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-11T04:37:09,088 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34583-0x101959e565f0000, quorum=127.0.0.1:62865, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:37:09,088 DEBUG [M:0;a7bef91497aa:34583 {}] zookeeper.ZKUtil(347): master:34583-0x101959e565f0000, quorum=127.0.0.1:62865, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-11T04:37:09,088 WARN [M:0;a7bef91497aa:34583 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-11T04:37:09,088 INFO [M:0;a7bef91497aa:34583 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:42503/user/jenkins/test-data/f01b0935-f454-2aee-3762-cc3d9126fc44/.lastflushedseqids 2024-11-11T04:37:09,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39623 is added to blk_1073741838_1014 (size=99) 2024-11-11T04:37:09,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42341 is added to blk_1073741838_1014 (size=99) 2024-11-11T04:37:09,096 INFO [M:0;a7bef91497aa:34583 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-11T04:37:09,096 INFO [M:0;a7bef91497aa:34583 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-11T04:37:09,096 DEBUG [M:0;a7bef91497aa:34583 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-11T04:37:09,096 INFO [M:0;a7bef91497aa:34583 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T04:37:09,096 DEBUG [M:0;a7bef91497aa:34583 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T04:37:09,096 DEBUG [M:0;a7bef91497aa:34583 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-11T04:37:09,096 DEBUG [M:0;a7bef91497aa:34583 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T04:37:09,096 INFO [M:0;a7bef91497aa:34583 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-11T04:37:09,111 DEBUG [M:0;a7bef91497aa:34583 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42503/user/jenkins/test-data/f01b0935-f454-2aee-3762-cc3d9126fc44/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/1daccff6542b4c59ac397d8238bd8090 is 82, key is hbase:meta,,1/info:regioninfo/1731299828782/Put/seqid=0 2024-11-11T04:37:09,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39623 is added to blk_1073741839_1015 (size=5672) 2024-11-11T04:37:09,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42341 is added to blk_1073741839_1015 (size=5672) 2024-11-11T04:37:09,116 INFO [M:0;a7bef91497aa:34583 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:42503/user/jenkins/test-data/f01b0935-f454-2aee-3762-cc3d9126fc44/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/1daccff6542b4c59ac397d8238bd8090 2024-11-11T04:37:09,133 DEBUG [M:0;a7bef91497aa:34583 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42503/user/jenkins/test-data/f01b0935-f454-2aee-3762-cc3d9126fc44/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/d94e24ce94d6493eb2519a5ffa79aa4a is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1731299828799/Put/seqid=0 2024-11-11T04:37:09,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39623 is added to blk_1073741840_1016 (size=5275) 2024-11-11T04:37:09,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42341 is added to blk_1073741840_1016 (size=5275) 2024-11-11T04:37:09,138 INFO [M:0;a7bef91497aa:34583 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:42503/user/jenkins/test-data/f01b0935-f454-2aee-3762-cc3d9126fc44/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/d94e24ce94d6493eb2519a5ffa79aa4a 2024-11-11T04:37:09,157 DEBUG [M:0;a7bef91497aa:34583 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42503/user/jenkins/test-data/f01b0935-f454-2aee-3762-cc3d9126fc44/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/c92842611ed044fbb3e68b93e6d019ef is 69, key is a7bef91497aa,42693,1731299828016/rs:state/1731299828249/Put/seqid=0 2024-11-11T04:37:09,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42341 is added to blk_1073741841_1017 (size=5156) 2024-11-11T04:37:09,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39623 is added to blk_1073741841_1017 (size=5156) 2024-11-11T04:37:09,162 INFO [M:0;a7bef91497aa:34583 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:42503/user/jenkins/test-data/f01b0935-f454-2aee-3762-cc3d9126fc44/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/c92842611ed044fbb3e68b93e6d019ef 2024-11-11T04:37:09,179 DEBUG [M:0;a7bef91497aa:34583 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42503/user/jenkins/test-data/f01b0935-f454-2aee-3762-cc3d9126fc44/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/009ee26109de4463bac5db67367124ab is 52, key is load_balancer_on/state:d/1731299828839/Put/seqid=0 2024-11-11T04:37:09,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39623 is added to blk_1073741842_1018 (size=5056) 2024-11-11T04:37:09,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42341 is added to blk_1073741842_1018 (size=5056) 2024-11-11T04:37:09,183 INFO [M:0;a7bef91497aa:34583 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:42503/user/jenkins/test-data/f01b0935-f454-2aee-3762-cc3d9126fc44/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/009ee26109de4463bac5db67367124ab 2024-11-11T04:37:09,184 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42693-0x101959e565f0001, quorum=127.0.0.1:62865, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T04:37:09,184 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42693-0x101959e565f0001, quorum=127.0.0.1:62865, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T04:37:09,184 INFO [RS:0;a7bef91497aa:42693 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-11T04:37:09,184 INFO [RS:0;a7bef91497aa:42693 {}] regionserver.HRegionServer(1031): Exiting; stopping=a7bef91497aa,42693,1731299828016; zookeeper connection closed. 2024-11-11T04:37:09,185 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@173c2888 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@173c2888 2024-11-11T04:37:09,185 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-11T04:37:09,188 DEBUG [M:0;a7bef91497aa:34583 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42503/user/jenkins/test-data/f01b0935-f454-2aee-3762-cc3d9126fc44/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/1daccff6542b4c59ac397d8238bd8090 as hdfs://localhost:42503/user/jenkins/test-data/f01b0935-f454-2aee-3762-cc3d9126fc44/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/1daccff6542b4c59ac397d8238bd8090 2024-11-11T04:37:09,192 INFO [M:0;a7bef91497aa:34583 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42503/user/jenkins/test-data/f01b0935-f454-2aee-3762-cc3d9126fc44/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/1daccff6542b4c59ac397d8238bd8090, entries=8, sequenceid=29, filesize=5.5 K 2024-11-11T04:37:09,192 DEBUG [M:0;a7bef91497aa:34583 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42503/user/jenkins/test-data/f01b0935-f454-2aee-3762-cc3d9126fc44/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/d94e24ce94d6493eb2519a5ffa79aa4a as hdfs://localhost:42503/user/jenkins/test-data/f01b0935-f454-2aee-3762-cc3d9126fc44/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/d94e24ce94d6493eb2519a5ffa79aa4a 2024-11-11T04:37:09,196 INFO [M:0;a7bef91497aa:34583 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42503/user/jenkins/test-data/f01b0935-f454-2aee-3762-cc3d9126fc44/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/d94e24ce94d6493eb2519a5ffa79aa4a, entries=3, sequenceid=29, filesize=5.2 K 2024-11-11T04:37:09,197 DEBUG [M:0;a7bef91497aa:34583 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42503/user/jenkins/test-data/f01b0935-f454-2aee-3762-cc3d9126fc44/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/c92842611ed044fbb3e68b93e6d019ef as hdfs://localhost:42503/user/jenkins/test-data/f01b0935-f454-2aee-3762-cc3d9126fc44/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/c92842611ed044fbb3e68b93e6d019ef 2024-11-11T04:37:09,200 INFO [M:0;a7bef91497aa:34583 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42503/user/jenkins/test-data/f01b0935-f454-2aee-3762-cc3d9126fc44/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/c92842611ed044fbb3e68b93e6d019ef, entries=1, sequenceid=29, filesize=5.0 K 2024-11-11T04:37:09,201 DEBUG [M:0;a7bef91497aa:34583 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42503/user/jenkins/test-data/f01b0935-f454-2aee-3762-cc3d9126fc44/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/009ee26109de4463bac5db67367124ab as hdfs://localhost:42503/user/jenkins/test-data/f01b0935-f454-2aee-3762-cc3d9126fc44/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/009ee26109de4463bac5db67367124ab 2024-11-11T04:37:09,205 INFO [M:0;a7bef91497aa:34583 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42503/user/jenkins/test-data/f01b0935-f454-2aee-3762-cc3d9126fc44/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/009ee26109de4463bac5db67367124ab, entries=1, sequenceid=29, filesize=4.9 K 2024-11-11T04:37:09,206 INFO [M:0;a7bef91497aa:34583 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 110ms, sequenceid=29, compaction requested=false 2024-11-11T04:37:09,208 INFO [M:0;a7bef91497aa:34583 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T04:37:09,208 DEBUG [M:0;a7bef91497aa:34583 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731299829096Disabling compacts and flushes for region at 1731299829096Disabling writes for close at 1731299829096Obtaining lock to block concurrent updates at 1731299829096Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731299829096Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1731299829096Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731299829097 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731299829097Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731299829111 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731299829111Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731299829120 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731299829133 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731299829133Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731299829141 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731299829156 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731299829156Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731299829165 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731299829178 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731299829178Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@45b5f40c: reopening flushed file at 1731299829187 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2822efb: reopening flushed file at 1731299829192 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@24e211c1: reopening flushed file at 1731299829196 (+4 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@32db4e79: reopening flushed file at 1731299829201 (+5 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 110ms, sequenceid=29, compaction requested=false at 1731299829206 (+5 ms)Writing region close event to WAL at 1731299829208 (+2 ms)Closed at 1731299829208 2024-11-11T04:37:09,208 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:37:09,208 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:37:09,208 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:37:09,209 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:37:09,209 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T04:37:09,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42341 is added to blk_1073741830_1006 (size=10311) 2024-11-11T04:37:09,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39623 is added to blk_1073741830_1006 (size=10311) 2024-11-11T04:37:09,211 INFO [M:0;a7bef91497aa:34583 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-11T04:37:09,211 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-11T04:37:09,211 INFO [M:0;a7bef91497aa:34583 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34583 2024-11-11T04:37:09,211 INFO [M:0;a7bef91497aa:34583 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-11T04:37:09,314 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34583-0x101959e565f0000, quorum=127.0.0.1:62865, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T04:37:09,314 INFO [M:0;a7bef91497aa:34583 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-11T04:37:09,314 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34583-0x101959e565f0000, quorum=127.0.0.1:62865, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T04:37:09,316 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6148e76b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T04:37:09,316 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@352a3917{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T04:37:09,317 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T04:37:09,317 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1e56b299{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T04:37:09,317 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@f33170f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c163589-cec0-7352-bd87-a2f16d3d09b9/hadoop.log.dir/,STOPPED} 2024-11-11T04:37:09,318 WARN [BP-2009007627-172.17.0.2-1731299827350 heartbeating to localhost/127.0.0.1:42503 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T04:37:09,318 WARN [BP-2009007627-172.17.0.2-1731299827350 heartbeating to localhost/127.0.0.1:42503 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2009007627-172.17.0.2-1731299827350 (Datanode Uuid a65446cb-4a0e-4a18-bf88-9efaa4f46e6d) service to localhost/127.0.0.1:42503 2024-11-11T04:37:09,318 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T04:37:09,318 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T04:37:09,319 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c163589-cec0-7352-bd87-a2f16d3d09b9/cluster_9aa1663f-04a2-2473-29c0-16199c4a7eb2/data/data3/current/BP-2009007627-172.17.0.2-1731299827350 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T04:37:09,319 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c163589-cec0-7352-bd87-a2f16d3d09b9/cluster_9aa1663f-04a2-2473-29c0-16199c4a7eb2/data/data4/current/BP-2009007627-172.17.0.2-1731299827350 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T04:37:09,319 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T04:37:09,321 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2c0b3275{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T04:37:09,321 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5bfccd5{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T04:37:09,321 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T04:37:09,321 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1391b748{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T04:37:09,321 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4b443d30{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c163589-cec0-7352-bd87-a2f16d3d09b9/hadoop.log.dir/,STOPPED} 2024-11-11T04:37:09,322 WARN [BP-2009007627-172.17.0.2-1731299827350 heartbeating to localhost/127.0.0.1:42503 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T04:37:09,322 WARN [BP-2009007627-172.17.0.2-1731299827350 heartbeating to localhost/127.0.0.1:42503 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2009007627-172.17.0.2-1731299827350 (Datanode Uuid 0f90b3cf-520e-4972-8f8a-5d20f334aa24) service to localhost/127.0.0.1:42503 2024-11-11T04:37:09,322 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T04:37:09,322 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T04:37:09,323 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c163589-cec0-7352-bd87-a2f16d3d09b9/cluster_9aa1663f-04a2-2473-29c0-16199c4a7eb2/data/data1/current/BP-2009007627-172.17.0.2-1731299827350 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T04:37:09,323 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T04:37:09,323 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c163589-cec0-7352-bd87-a2f16d3d09b9/cluster_9aa1663f-04a2-2473-29c0-16199c4a7eb2/data/data2/current/BP-2009007627-172.17.0.2-1731299827350 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T04:37:09,328 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@61290c18{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-11T04:37:09,328 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3287f588{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T04:37:09,329 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T04:37:09,329 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@44711d59{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T04:37:09,329 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@43f8fe5e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6c163589-cec0-7352-bd87-a2f16d3d09b9/hadoop.log.dir/,STOPPED} 2024-11-11T04:37:09,335 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-11T04:37:09,347 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-11T04:37:09,356 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=269 (was 231) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:42503 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-16-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:42503 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: globalEventExecutor-1-21 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//io.netty.util.concurrent.GlobalEventExecutor.takeTask(GlobalEventExecutor.java:113) app//io.netty.util.concurrent.GlobalEventExecutor$TaskRunner.run(GlobalEventExecutor.java:259) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.7@localhost:42503 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:42503 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-44-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42503 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42503 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42503 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:42503 from jenkins.hfs.7 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-45-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=538 (was 515) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=22 (was 24), ProcessCount=11 (was 11), AvailableMemoryMB=6537 (was 6546)